repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
axinging/chromium-crosswalk
|
refs/heads/master
|
components/test/data/autofill/merge/tools/reserialize_profiles_from_query.py
|
162
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from autofill_merge_common import SerializeProfiles, ColumnNameToFieldType
def main():
"""Serializes the output of the query 'SELECT * from autofill_profiles;'.
"""
COLUMNS = ['GUID', 'LABEL', 'FIRST_NAME', 'MIDDLE_NAME', 'LAST_NAME', 'EMAIL',
'COMPANY_NAME', 'ADDRESS_LINE_1', 'ADDRESS_LINE_2', 'CITY',
'STATE', 'ZIPCODE', 'COUNTRY', 'PHONE', 'DATE_MODIFIED']
if len(sys.argv) != 2:
print ("Usage: python reserialize_profiles_from_query.py "
"<path/to/serialized_profiles>")
return
types = [ColumnNameToFieldType(column_name) for column_name in COLUMNS]
profiles = []
with open(sys.argv[1], 'r') as serialized_profiles:
for line in serialized_profiles:
# trim the newline if present
if line[-1] == '\n':
line = line[:-1]
values = line.split("|")
profiles.append(zip(types, values))
print SerializeProfiles(profiles)
return 0
if __name__ == '__main__':
sys.exit(main())
|
omerhasan/namebench
|
refs/heads/master
|
nb_third_party/graphy/bar_chart.py
|
233
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to bar charts."""
import copy
import warnings
from graphy import common
from graphy import util
class BarsStyle(object):
"""Style of a series of bars in a BarChart
Object Attributes:
color: Hex string, like '00ff00' for green
"""
def __init__(self, color):
self.color = color
class BarChartStyle(object):
"""Represents the style for bars on a BarChart.
Any of the object attributes may be set to None, in which case the
value will be auto-calculated.
Object Attributes:
bar_thickness: The thickness of a bar, in pixels.
bar_gap: The gap between bars, in pixels, or as a fraction of bar thickness
if use_fractional_gap_spacing is True.
group_gap: The gap between groups of bars, in pixels, or as a fraction of
bar thickness if use_fractional_gap_spacing is True.
use_fractional_gap_spacing: if True, bar_gap and group_gap specify gap
sizes as a fraction of bar width. Default is False.
"""
_DEFAULT_GROUP_GAP = 8
_DEFAULT_BAR_GAP = 4
def __init__(self, bar_thickness=None,
bar_gap=_DEFAULT_BAR_GAP, group_gap=_DEFAULT_GROUP_GAP,
use_fractional_gap_spacing=False):
"""Create a new BarChartStyle.
Args:
bar_thickness: The thickness of a bar, in pixels. Set this to None if
you want the bar thickness to be auto-calculated (this is the default
behaviour).
bar_gap: The gap between bars, in pixels. Default is 4.
group_gap: The gap between groups of bars, in pixels. Default is 8.
"""
self.bar_thickness = bar_thickness
self.bar_gap = bar_gap
self.group_gap = group_gap
self.use_fractional_gap_spacing = use_fractional_gap_spacing
class BarStyle(BarChartStyle):
def __init__(self, *args, **kwargs):
warnings.warn('BarStyle is deprecated. Use BarChartStyle.',
DeprecationWarning, stacklevel=2)
super(BarStyle, self).__init__(*args, **kwargs)
class BarChart(common.BaseChart):
"""Represents a bar chart.
Object attributes:
vertical: if True, the bars will be vertical. Default is True.
stacked: if True, the bars will be stacked. Default is False.
style: The BarChartStyle for all bars on this chart, specifying bar
thickness and gaps between bars.
"""
def __init__(self, points=None):
"""Constructor for BarChart objects."""
super(BarChart, self).__init__()
if points is not None:
self.AddBars(points)
self.vertical = True
self.stacked = False
self.style = BarChartStyle(None, None, None) # full auto
def AddBars(self, points, label=None, color=None):
"""Add a series of bars to the chart.
points: List of y-values for the bars in this series
label: Name of the series (used in the legend)
color: Hex string, like '00ff00' for green
This is a convenience method which constructs & appends the DataSeries for
you.
"""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label is a hex triplet. Maybe it is a color? The '
'old argument order (color before label) is deprecated.',
DeprecationWarning, stacklevel=2)
style = BarsStyle(color)
series = common.DataSeries(points, label=label, style=style)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Get the dependendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
else:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
def GetIndependentAxes(self):
"""Get the independendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
else:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
def GetDependentAxis(self):
"""Get the main dependendant axis, which depends on orientation."""
if self.vertical:
return self.left
else:
return self.bottom
def GetIndependentAxis(self):
"""Get the main independendant axis, which depends on orientation."""
if self.vertical:
return self.bottom
else:
return self.left
def GetMinMaxValues(self):
"""Get the largest & smallest bar values as (min_value, max_value)."""
if not self.stacked:
return super(BarChart, self).GetMinMaxValues()
if not self.data:
return None, None # No data, nothing to do.
num_bars = max(len(series.data) for series in self.data)
positives = [0 for i in xrange(0, num_bars)]
negatives = list(positives)
for series in self.data:
for i, point in enumerate(series.data):
if point:
if point > 0:
positives[i] += point
else:
negatives[i] += point
min_value = min(min(positives), min(negatives))
max_value = max(max(positives), max(negatives))
return min_value, max_value
|
glciampaglia/pymulator
|
refs/heads/master
|
pymulator/cmdline.py
|
1
|
""" Main console scripts entry point. """
import argparse
import copy
from . import core
from . import sim
commands = ['simulate', 'createsim', 'showsim', 'plot']
def simulate(args):
"""run a simulation"""
s = sim.Sim(args.sim_path)
df = core.simulate(s)
if args.reproduce:
if not hasattr(s, 'results'):
raise ValueError("No results in {}".format(args.sim_path))
if all(s.results == df):
print("*** Simulation reproduced: OK ***")
else:
if args.out_path is None:
args.out_path = args.sim_path + '.fail'
s2 = copy.copy(s)
s2.results = df
s2.dump(args.out_path)
print("*** Simulation reproduced: FAIL ***")
print("Results written to: {}".format(args.out_path))
else:
s.results = df
if args.out_path is not None:
path = args.out_path
else:
path = args.sim_path
s.dump(path)
print("Results written to: {}".format(path))
def createsim(args):
"""create a new simulation"""
print("Command not implemented!")
def showsim(args):
"""show simulation details"""
print("Command not implemented!")
def plot(args):
"""plot simulation results"""
print("Command not implemented!")
### Parsers
def simulate_parser(parser):
parser.add_argument('sim_path',
metavar='simulation',
help='path to simulation file')
parser.add_argument('-r',
'--reproduce',
action='store_true',
help='re-simulate and check results are the same')
parser.add_argument('-o',
'--output',
dest='out_path',
metavar='path',
help='write simulation to %(metavar)s')
def createsim_parser(parser):
pass
def showsim_parser(parser):
pass
def plot_parser(parser):
pass
def main():
__globals__ = globals()
descr = "reproducible simulation for lazy Python programmers"
parser = argparse.ArgumentParser(description=descr)
subparsers = parser.add_subparsers()
for cmd in commands:
cmdf = __globals__[cmd]
subp = subparsers.add_parser(cmd, help=cmdf.__doc__)
__globals__[cmd + '_parser'](subp)
subp.set_defaults(func=cmdf)
args = parser.parse_args()
if 'func' in args:
args.func(args)
else:
parser.error("please specify at least one command")
|
SungJinYoo/BookShare
|
refs/heads/master
|
bookshare/wsgi.py
|
1
|
"""
WSGI config for bookshare project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bookshare.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
ManushB/Flask-Blog
|
refs/heads/master
|
app/forms.py
|
22
|
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
|
JackieXie168/rethinkdb
|
refs/heads/next
|
test/common/http_support/flask/app.py
|
427
|
# -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package
from . import json
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on expected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is not application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where they key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns `None` or raises a `BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the `flaskext` module. For example in
#: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
methods = set(methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
# due to a werkzeug bug we need to make sure that the defaults are
# None if they are an empty dictionary. This should not be necessary
# with Werkzeug 0.7
options['defaults'] = options.get('defaults') or None
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, integer_types):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead if will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
self._got_first_request = True
for func in self.before_first_request_funcs:
func()
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns `True` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list of
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers, status=status)
headers = status = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status is not None:
if isinstance(status, string_types):
rv.status = status
else:
rv.status_code = status
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as error:
pass
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=None):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is None:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
rv = func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:func:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
|
ammarkhann/FinalSeniorCode
|
refs/heads/master
|
lib/python2.7/site-packages/scipy/signal/__init__.py
|
12
|
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response from TF coefficients.
freqs_zpk -- Analog filter frequency response from ZPK coefficients.
freqz -- Digital filter frequency response from TF coefficients.
freqz_zpk -- Digital filter frequency response from ZPK coefficients.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
unit_impulse -- Discrete unit impulse
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
stft -- Compute the Short Time Fourier Transform
istft -- Compute the Inverse Short Time Fourier Transform
check_COLA -- Check the COLA constraint for iSTFT reconstruction
"""
from __future__ import division, print_function, absolute_import
from . import sigtools
from .waveforms import *
from ._max_len_seq import max_len_seq
from ._upfirdn import upfirdn
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from .spline import *
from .bsplines import *
from .filter_design import *
from .fir_filter_design import *
from .ltisys import *
from .lti_conversion import *
from .windows import *
from .signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from .spectral import *
from .wavelets import *
from ._peak_finding import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
josephpage/compose
|
refs/heads/master
|
tests/unit/cli/verbose_proxy_test.py
|
57
|
from __future__ import unicode_literals
from __future__ import absolute_import
from tests import unittest
from compose.cli import verbose_proxy
class VerboseProxyTestCase(unittest.TestCase):
def test_format_call(self):
expected = "(u'arg1', True, key=u'value')"
actual = verbose_proxy.format_call(
("arg1", True),
{'key': 'value'})
self.assertEqual(expected, actual)
def test_format_return_sequence(self):
expected = "(list with 10 items)"
actual = verbose_proxy.format_return(list(range(10)), 2)
self.assertEqual(expected, actual)
def test_format_return(self):
expected = "{u'Id': u'ok'}"
actual = verbose_proxy.format_return({'Id': 'ok'}, 2)
self.assertEqual(expected, actual)
def test_format_return_no_result(self):
actual = verbose_proxy.format_return(None, 2)
self.assertEqual(None, actual)
|
saumishr/django
|
refs/heads/master
|
tests/modeltests/defer/__init__.py
|
12133432
| |
listyque/TACTIC-Handler
|
refs/heads/stable
|
thlib/side/natsort/compat/__init__.py
|
12133432
| |
pyecs/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/filters/__init__.py
|
12133432
| |
libracore/erpnext
|
refs/heads/v12
|
erpnext/communication/doctype/call_log/__init__.py
|
12133432
| |
scipy/scipy
|
refs/heads/master
|
scipy/interpolate/tests/test_fitpack2.py
|
12
|
# Created by Pearu Peterson, June 2003
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, suppress_warnings)
from pytest import raises as assert_raises
from numpy import array, diff, linspace, meshgrid, ones, pi, shape
from scipy.interpolate.fitpack import bisplrep, bisplev
from scipy.interpolate.fitpack2 import (UnivariateSpline,
LSQUnivariateSpline, InterpolatedUnivariateSpline,
LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
RectSphereBivariateSpline)
class TestUnivariateSpline:
def test_linear_constant(self):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
def test_preserve_shape(self):
x = [1, 2, 3]
y = [0, 2, 4]
lut = UnivariateSpline(x, y, k=1)
arg = 2
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
arg = [1.5, 2, 2.5]
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
def test_linear_1d(self):
x = [1,2,3]
y = [0,2,4]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[0,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
def test_subclassing(self):
# See #731
class ZeroSpline(UnivariateSpline):
def __call__(self, x):
return 0*array(x)
sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
assert_array_equal(sp([1.5, 2.5]), [0., 0.])
def test_empty_input(self):
# Test whether empty input returns an empty output. Ticket 1014
x = [1,3,5,7,9]
y = [0,4,9,12,21]
spl = UnivariateSpline(x, y, k=3)
assert_array_equal(spl([]), array([]))
def test_resize_regression(self):
"""Regression test for #1375."""
x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
-0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
0.65016502, 1.]
y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
0.62928599, 1.]
w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
1.00000000e+12]
spl = UnivariateSpline(x=x, y=y, w=w, s=None)
desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
def test_out_of_range_regression(self):
# Test different extrapolation modes. See ticket 3557
x = np.arange(5, dtype=float)
y = x**3
xp = linspace(-8, 13, 100)
xp_zeros = xp.copy()
xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
xp_clip = xp.copy()
xp_clip[xp_clip < x[0]] = x[0]
xp_clip[xp_clip > x[-1]] = x[-1]
for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
spl = cls(x=x, y=y)
for ext in [0, 'extrapolate']:
assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
for ext in [1, 'zeros']:
assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
for ext in [2, 'raise']:
assert_raises(ValueError, spl, xp, **dict(ext=ext))
for ext in [3, 'const']:
assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
# also test LSQUnivariateSpline [which needs explicit knots]
t = spl.get_knots()[3:4] # interior knots w/ default k=3
spl = LSQUnivariateSpline(x, y, t)
assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
assert_raises(ValueError, spl, xp, **dict(ext=2))
assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
# also make sure that unknown values for `ext` are caught early
for ext in [-1, 'unknown']:
spl = UnivariateSpline(x, y)
assert_raises(ValueError, spl, xp, **dict(ext=ext))
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, ext=ext))
def test_lsq_fpchec(self):
xs = np.arange(100) * 1.
ys = np.arange(100) * 1.
knots = np.linspace(0, 99, 10)
bbox = (-1, 101)
assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
bbox=bbox)
def test_derivative_and_antiderivative(self):
# Thin wrappers to splder/splantider, so light smoke test only.
x = np.linspace(0, 1, 70)**3
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
spl2 = spl.antiderivative(2).derivative(2)
assert_allclose(spl(0.3), spl2(0.3))
spl2 = spl.antiderivative(1)
assert_allclose(spl2(0.6) - spl2(0.2),
spl.integral(0.2, 0.6))
def test_derivative_extrapolation(self):
# Regression test for gh-10195: for a const-extrapolation spline
# its derivative evaluates to zero for extrapolation
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 5]
f = UnivariateSpline(x_values, y_values, ext='const', k=3)
x = [-1, 0, -0.5, 9, 9.5, 10]
assert_allclose(f.derivative()(x), 0, atol=1e-15)
def test_integral_out_of_bounds(self):
# Regression test for gh-7906: .integral(a, b) is wrong if both
# a and b are out-of-bounds
x = np.linspace(0., 1., 7)
for ext in range(4):
f = UnivariateSpline(x, x, s=0, ext=ext)
for (a, b) in [(1, 1), (1, 5), (2, 5),
(0, 0), (-2, 0), (-2, -1)]:
assert_allclose(f.integral(a, b), 0, atol=1e-15)
def test_nan(self):
# bail out early if the input data contains nans
x = np.arange(10, dtype=float)
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(x, y, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
y_end = y[-1]
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, check_finite=True))
y[-1] = y_end # check valid y but invalid w
w[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_strictly_increasing_x(self):
# Test the x is required to be strictly increasing for
# UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
# but merely increasing for UnivariateSpline if s>0
# and for LSQUnivariateSpline; see gh-8535
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0]
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, s=0, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
def test_increasing_x(self):
# Test that x is required to be increasing, see gh-8535
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0] - 1.0
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_invalid_input_for_univariate_spline(self):
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
UnivariateSpline(x_values, y_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [-1.0, 1.0, 1.0, 1.0]
UnivariateSpline(x_values, y_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
UnivariateSpline(x_values, y_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
UnivariateSpline(x_values, y_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
with assert_raises(ValueError) as info:
UnivariateSpline(x_values, y_values, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
def test_invalid_input_for_interpolated_univariate_spline(self):
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
InterpolatedUnivariateSpline(x_values, y_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [-1.0, 1.0, 1.0, 1.0]
InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
InterpolatedUnivariateSpline(x_values, y_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
def test_invalid_input_for_lsq_univariate_spline(self):
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
spl = UnivariateSpline(x_values, y_values, check_finite=True)
t_values = spl.get_knots()[3:4] # interior knots w/ default k=3
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
LSQUnivariateSpline(x_values, y_values, t_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [1.0, 1.0, 1.0, 1.0]
LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (100, -100)
LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
assert "Interior knots t must satisfy Schoenberg-Whitney conditions" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
LSQUnivariateSpline(x_values, y_values, t_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
def test_array_like_input(self):
x_values = np.array([1, 2, 4, 6, 8.5])
y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
bbox = np.array([-100, 100])
# np.array input
spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
bbox=bbox)
# list input
spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
w=w_values.tolist(), bbox=bbox.tolist())
assert_allclose(spl1([0.1, 0.5, 0.9, 0.99]),
spl2([0.1, 0.5, 0.9, 0.99]))
class TestLSQBivariateSpline:
# NOTE: The systems in this test class are rank-deficient
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_equal(len(r), 1)
assert_almost_equal(lut(2,2), 3.)
def test_bilinearity(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,7,8,3,4,7,1,3,4]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
# This seems to fail (ier=1, see ticket 1642).
sup.filter(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
for xa, xb in zip(tx[:-1], tx[1:]):
for ya, yb in zip(ty[:-1], ty[1:]):
for t in [0.1, 0.5, 0.9]:
for s in [0.3, 0.4, 0.7]:
xp = xa*(1-t) + xb*t
yp = ya*(1-s) + yb*s
zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ lut(xb, ya)*t*(1-s)
+ lut(xa, yb)*(1-t)*s
+ lut(xb, yb)*t*s)
assert_almost_equal(lut(xp,yp), zp)
def test_integral(self):
x = [1,1,1,2,2,2,8,8,8]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
assert_equal(len(r), 1)
tx, ty = lut.get_knots()
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
trpz)
def test_empty_input(self):
# Test whether empty inputs returns an empty output. Ticket 1014
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
assert_equal(len(r), 1)
assert_array_equal(lut([], []), np.zeros((0,0)))
assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
def test_invalid_input(self):
s = 0.1
tx = [1 + s, 3 - s]
ty = [1 + s, 3 - s]
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0, num=10)
LSQBivariateSpline(x, y, z, tx, ty)
assert "x, y, and z should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0, num=20)
LSQBivariateSpline(x, y, z, tx, ty, w=w)
assert "x, y, z, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
w = np.linspace(-1.0, 10.0)
LSQBivariateSpline(x, y, z, tx, ty, w=w)
assert "w should be positive" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-100, 100, -100)
LSQBivariateSpline(x, y, z, tx, ty, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
LSQBivariateSpline(x, y, z, tx, ty, kx=10, ky=10)
assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in \
str(info.value)
with assert_raises(ValueError) as exc_info:
LSQBivariateSpline(x, y, z, tx, ty, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
LSQBivariateSpline(x, y, z, tx, ty, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
s = 0.1
tx = np.array([1 + s, 3 - s])
ty = np.array([1 + s, 3 - s])
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0)
bbox = np.array([1.0, 10.0, 1.0, 10.0])
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
# np.array input
spl1 = LSQBivariateSpline(x, y, z, tx, ty, w=w, bbox=bbox)
# list input
spl2 = LSQBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
tx.tolist(), ty.tolist(), w=w.tolist(),
bbox=bbox)
assert_allclose(spl1(2.0, 2.0), spl2(2.0, 2.0))
assert_equal(len(r), 2)
def test_unequal_length_of_knots(self):
"""Test for the case when the input knot-location arrays in x and y are
of different lengths.
"""
x, y = np.mgrid[0:100, 0:100]
x = x.ravel()
y = y.ravel()
z = 3.0 * np.ones_like(x)
tx = np.linspace(0.1, 98.0, 29)
ty = np.linspace(0.1, 98.0, 33)
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty)
assert_equal(len(r), 1)
assert_almost_equal(lut(x, y, grid=False), z)
class TestSmoothBivariateSpline:
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
def test_linear_1d(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,0,0,2,2,2,4,4,4]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
def test_integral(self):
x = [1,1,1,2,2,2,4,4,4]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
with suppress_warnings() as sup:
# This seems to fail (ier=1, see ticket 1642).
sup.filter(UserWarning, "\nThe required storage space")
lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
tx = [1,2,4]
ty = [1,2,3]
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
decimal=0) # the quadratures give 23.75 and 23.85
tz = lut(tx[:-1], ty[:-1])
trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
def test_rerun_lwrk2_too_small(self):
# in this setting, lwrk2 is too small in the default run. Here we
# check for equality with the bisplrep/bisplev output because there,
# an automatic re-run of the spline representation is done if ier>10.
x = np.linspace(-2, 2, 80)
y = np.linspace(-2, 2, 80)
z = x + y
xi = np.linspace(-1, 1, 100)
yi = np.linspace(-2, 2, 100)
tck = bisplrep(x, y, z)
res1 = bisplev(xi, yi, tck)
interp_ = SmoothBivariateSpline(x, y, z)
res2 = interp_(xi, yi)
assert_almost_equal(res1, res2)
def test_invalid_input(self):
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0, num=10)
SmoothBivariateSpline(x, y, z)
assert "x, y, and z should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0, num=20)
SmoothBivariateSpline(x, y, z, w=w)
assert "x, y, z, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
w = np.linspace(-1.0, 10.0)
SmoothBivariateSpline(x, y, z, w=w)
assert "w should be positive" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-100, 100, -100)
SmoothBivariateSpline(x, y, z, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
SmoothBivariateSpline(x, y, z, kx=10, ky=10)
assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
str(info.value)
with assert_raises(ValueError) as info:
SmoothBivariateSpline(x, y, z, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
with assert_raises(ValueError) as exc_info:
SmoothBivariateSpline(x, y, z, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothBivariateSpline(x, y, z, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
bbox = np.array([1.0, 3.0, 1.0, 3.0])
# np.array input
spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
# list input
spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
bbox=bbox.tolist(), w=w.tolist(),
kx=1, ky=1)
assert_allclose(spl1(0.1, 0.5), spl2(0.1, 0.5))
class TestLSQSphereBivariateSpline:
def setup_method(self):
# define the input data and coordinates
ntheta, nphi = 70, 90
theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
knotdata = data[::5, ::5]
# calculate spline coefficients
lats, lons = meshgrid(theta, phi)
lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
self.lut_lsq = lut_lsq
self.data = knotdata
self.new_lons, self.new_lats = knotsp, knotst
def test_linear_constant(self):
assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
self.data)
def test_empty_input(self):
assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
def test_invalid_input(self):
ntheta, nphi = 70, 90
theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
ntheta) * pi
phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
with assert_raises(ValueError) as exc_info:
invalid_theta = linspace(-0.1, 1.0, num=ntheta) * pi
invalid_lats, lons = meshgrid(invalid_theta, phi)
LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_theta = linspace(0.1, 1.1, num=ntheta) * pi
invalid_lats, lons = meshgrid(invalid_theta, phi)
LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = linspace(-0.1, 1.0, num=ntheta) * 2.0 * pi
lats, invalid_lons = meshgrid(theta, invalid_phi)
LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = linspace(0.0, 1.1, num=ntheta) * 2.0 * pi
lats, invalid_lons = meshgrid(theta, invalid_phi)
LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
data.T.ravel(), knotst, knotsp)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
lats, lons = meshgrid(theta, phi)
with assert_raises(ValueError) as exc_info:
invalid_knotst = np.copy(knotst)
invalid_knotst[0] = -0.1
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), invalid_knotst, knotsp)
assert "tt should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_knotst = np.copy(knotst)
invalid_knotst[0] = pi
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), invalid_knotst, knotsp)
assert "tt should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_knotsp = np.copy(knotsp)
invalid_knotsp[0] = -0.1
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, invalid_knotsp)
assert "tp should be between (0, 2pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_knotsp = np.copy(knotsp)
invalid_knotsp[0] = 2 * pi
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, invalid_knotsp)
assert "tp should be between (0, 2pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
knotst, knotsp, w=invalid_w)
assert "w should be positive" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
knotst, knotsp, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
knotst, knotsp, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
ntheta, nphi = 70, 90
theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
ntheta) * pi
phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1),
nphi) * 2. * pi
lats, lons = meshgrid(theta, phi)
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
w = ones((lats.ravel().shape[0]))
# np.array input
spl1 = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp, w=w)
# list input
spl2 = LSQSphereBivariateSpline(lats.ravel().tolist(),
lons.ravel().tolist(),
data.T.ravel().tolist(),
knotst.tolist(),
knotsp.tolist(), w=w.tolist())
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
class TestSmoothSphereBivariateSpline:
def setup_method(self):
theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
.75*pi, .75*pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
def test_linear_constant(self):
assert_almost_equal(self.lut.get_residual(), 0.)
assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
[[3, 3], [3, 3], [3, 3]])
def test_empty_input(self):
assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
def test_invalid_input(self):
theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi, .5 * pi,
.75 * pi, .75 * pi, .75 * pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
with assert_raises(ValueError) as exc_info:
invalid_theta = array([-0.1 * pi, .25 * pi, .25 * pi, .5 * pi,
.5 * pi, .5 * pi, .75 * pi, .75 * pi,
.75 * pi])
SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi,
.5 * pi, .5 * pi, .75 * pi, .75 * pi,
1.1 * pi])
SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
assert "theta should be between [0, pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = array([-.1 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
.5 * pi, pi, 1.5 * pi])
SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_phi = array([1.0 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
.5 * pi, pi, 2.1 * pi])
SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
assert "phi should be between [0, 2pi]" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
SmoothSphereBivariateSpline(theta, phi, r, w=invalid_w, s=1E10)
assert "w should be positive" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothSphereBivariateSpline(theta, phi, r, s=-1.0)
assert "s should be positive" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothSphereBivariateSpline(theta, phi, r, eps=-1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothSphereBivariateSpline(theta, phi, r, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
theta = np.array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi,
.5 * pi, .75 * pi, .75 * pi, .75 * pi])
phi = np.array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi,
pi, 1.5 * pi])
r = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
w = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# np.array input
spl1 = SmoothSphereBivariateSpline(theta, phi, r, w=w, s=1E10)
# list input
spl2 = SmoothSphereBivariateSpline(theta.tolist(), phi.tolist(),
r.tolist(), w=w.tolist(), s=1E10)
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
class TestRectBivariateSpline:
def test_defaults(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
[0,0,-11,0,0],[0,0,4,0,0]])/6.
dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
[2,.25,0,-.25,-2],[4,-1,0,1,-4]])
dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
[-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1),dx)
assert_array_almost_equal(lut(x,y,dy=1),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
def test_derivatives(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([0,0,2./3,0,0])
dy = array([4,-1,0,-.25,-4])
dxdy = array([160,65,0,55,32])/24.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
def test_broadcast(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
def test_invalid_input(self):
with assert_raises(ValueError) as info:
x = array([6, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
RectBivariateSpline(x, y, z)
assert "x must be strictly increasing" in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([2, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
RectBivariateSpline(x, y, z)
assert "y must be strictly increasing" in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1]])
RectBivariateSpline(x, y, z)
assert "x dimension of z must have same number of elements as x"\
in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
[1, 2, 2, 2], [1, 2, 1, 2]])
RectBivariateSpline(x, y, z)
assert "y dimension of z must have same number of elements as y"\
in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
bbox = (-100, 100, -100)
RectBivariateSpline(x, y, z, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
RectBivariateSpline(x, y, z, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
def test_array_like_input(self):
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
bbox = array([1, 5, 1, 5])
spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
bbox=bbox.tolist())
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
def test_not_increasing_input(self):
# gh-8565
NSamp = 20
Theta = np.random.uniform(0, np.pi, NSamp)
Phi = np.random.uniform(0, 2 * np.pi, NSamp)
Data = np.ones(NSamp)
Interpolator = SmoothSphereBivariateSpline(Theta, Phi, Data, s=3.5)
NLon = 6
NLat = 3
GridPosLats = np.arange(NLat) / NLat * np.pi
GridPosLons = np.arange(NLon) / NLon * 2 * np.pi
# No error
Interpolator(GridPosLats, GridPosLons)
nonGridPosLats = GridPosLats.copy()
nonGridPosLats[2] = 0.001
with assert_raises(ValueError) as exc_info:
Interpolator(nonGridPosLats, GridPosLons)
assert "x must be strictly increasing" in str(exc_info.value)
nonGridPosLons = GridPosLons.copy()
nonGridPosLons[2] = 0.001
with assert_raises(ValueError) as exc_info:
Interpolator(GridPosLats, nonGridPosLons)
assert "y must be strictly increasing" in str(exc_info.value)
class TestRectSphereBivariateSpline:
def test_defaults(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_derivatives(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
assert_allclose(lut(x, y, dtheta=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_invalid_input(self):
data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
with assert_raises(ValueError) as exc_info:
lats = np.linspace(0, 170, 9) * np.pi / 180.
lons = np.linspace(0, 350, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "u should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 180, 9) * np.pi / 180.
lons = np.linspace(0, 350, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "u should be between (0, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 170, 9) * np.pi / 180.
lons = np.linspace(-181, 10, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 170, 9) * np.pi / 180.
lons = np.linspace(-10, 360, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data)
assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
lats = np.linspace(10, 170, 9) * np.pi / 180.
lons = np.linspace(10, 350, 18) * np.pi / 180.
RectSphereBivariateSpline(lats, lons, data, s=-1)
assert "s should be positive" in str(exc_info.value)
def test_array_like_input(self):
y = linspace(0.01, 2 * pi - 0.01, 7)
x = linspace(0.01, pi - 0.01, 7)
z = array([[1, 2, 1, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
[1, 2, 3, 2, 1, 2, 1],
[1, 2, 2, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
[1, 2, 2, 2, 1, 2, 1],
[1, 2, 1, 2, 1, 2, 1]])
# np.array input
spl1 = RectSphereBivariateSpline(x, y, z)
# list input
spl2 = RectSphereBivariateSpline(x.tolist(), y.tolist(), z.tolist())
assert_array_almost_equal(spl1(x, y), spl2(x, y))
def test_negative_evaluation(self):
lats = np.array([25, 30, 35, 40, 45])
lons = np.array([-90, -85, -80, -75, 70])
mesh = np.meshgrid(lats, lons)
data = mesh[0] + mesh[1] # lon + lat value
lat_r = np.radians(lats)
lon_r = np.radians(lons)
interpolator = RectSphereBivariateSpline(lat_r, lon_r, data)
query_lat = np.radians(np.array([35, 37.5]))
query_lon = np.radians(np.array([-80, -77.5]))
data_interp = interpolator(query_lat, query_lon)
ans = np.array([[-45.0, -42.480862],
[-49.0625, -46.54315]])
assert_array_almost_equal(data_interp, ans)
def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
if dx == 0 and dy == 0:
return func(x, y)
elif dx == 1 and dy == 0:
return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
elif dx == 0 and dy == 1:
return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
elif dx == 1 and dy == 1:
return (func(x + eps, y + eps) - func(x - eps, y + eps)
- func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
else:
raise ValueError("invalid derivative order")
|
MTASZTAKI/ApertusVR
|
refs/heads/0.9
|
plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/deps/v8/tools/turbolizer-perf.py
|
12
|
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import json
import re
import argparse
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
def trace_begin():
json_obj['eventCounts'] = {}
prog = re.compile(r'0x[0-9a-fA-F]+')
for phase in reversed(json_obj['phases']):
if phase['name'] == "disassembly":
for line in phase['data'].splitlines():
result = re.match(prog, line)
if result:
known_addrs.add(result.group(0))
def trace_end():
print json.dumps(json_obj)
def process_event(param_dict):
addr = "0x%x" % int(param_dict['sample']['ip'])
# Only count samples that belong to the function
if addr not in known_addrs:
return
ev_name = param_dict['ev_name']
if ev_name not in json_obj['eventCounts']:
json_obj['eventCounts'][ev_name] = {}
if addr not in json_obj['eventCounts'][ev_name]:
json_obj['eventCounts'][ev_name][addr] = 0
json_obj['eventCounts'][ev_name][addr] += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Perf script to merge profiling data with turbofan compiler "
"traces.")
parser.add_argument("file_name", metavar="JSON File",
help="turbo trace json file.")
args = parser.parse_args()
with open(args.file_name, 'r') as json_file:
json_obj = json.load(json_file)
known_addrs = set()
|
CallaJun/hackprince
|
refs/heads/master
|
indico/nose/loader.py
|
5
|
"""
Test Loader
-----------
nose's test loader implements the same basic functionality as its
superclass, unittest.TestLoader, but extends it by more liberal
interpretations of what may be a test and how a test may be named.
"""
from __future__ import generators
import logging
import os
import sys
import unittest
import types
from inspect import isfunction
from nose.pyversion import unbound_method, ismethod
from nose.case import FunctionTestCase, MethodTestCase
from nose.failure import Failure
from nose.config import Config
from nose.importer import Importer, add_path, remove_path
from nose.selector import defaultSelector, TestAddress
from nose.util import func_lineno, getpackage, isclass, isgenerator, \
ispackage, regex_last_key, resolve_name, transplant_func, \
transplant_class, test_address
from nose.suite import ContextSuiteFactory, ContextList, LazySuite
from nose.pyversion import sort_list, cmp_to_key
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
# for efficiency and easier mocking
op_normpath = os.path.normpath
op_abspath = os.path.abspath
op_join = os.path.join
op_isdir = os.path.isdir
op_isfile = os.path.isfile
__all__ = ['TestLoader', 'defaultTestLoader']
class TestLoader(unittest.TestLoader):
"""Test loader that extends unittest.TestLoader to:
* Load tests from test-like functions and classes that are not
unittest.TestCase subclasses
* Find and load test modules in a directory
* Support tests that are generators
* Support easy extensions of or changes to that behavior through plugins
"""
config = None
importer = None
workingDir = None
selector = None
suiteClass = None
def __init__(self, config=None, importer=None, workingDir=None,
selector=None):
"""Initialize a test loader.
Parameters (all optional):
* config: provide a `nose.config.Config`_ or other config class
instance; if not provided a `nose.config.Config`_ with
default values is used.
* importer: provide an importer instance that implements
`importFromPath`. If not provided, a
`nose.importer.Importer`_ is used.
* workingDir: the directory to which file and module names are
relative. If not provided, assumed to be the current working
directory.
* selector: a selector class or instance. If a class is
provided, it will be instantiated with one argument, the
current config. If not provided, a `nose.selector.Selector`_
is used.
"""
if config is None:
config = Config()
if importer is None:
importer = Importer(config=config)
if workingDir is None:
workingDir = config.workingDir
if selector is None:
selector = defaultSelector(config)
elif isclass(selector):
selector = selector(config)
self.config = config
self.importer = importer
self.workingDir = op_normpath(op_abspath(workingDir))
self.selector = selector
if config.addPaths:
add_path(workingDir, config)
self.suiteClass = ContextSuiteFactory(config=config)
self._visitedPaths = set([])
unittest.TestLoader.__init__(self)
def getTestCaseNames(self, testCaseClass):
"""Override to select with selector, unless
config.getTestCaseNamesCompat is True
"""
if self.config.getTestCaseNamesCompat:
return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
def wanted(attr, cls=testCaseClass, sel=self.selector):
item = getattr(cls, attr, None)
if isfunction(item):
item = unbound_method(cls, item)
elif not ismethod(item):
return False
return sel.wantMethod(item)
cases = filter(wanted, dir(testCaseClass))
# add runTest if nothing else picked
if not cases and hasattr(testCaseClass, 'runTest'):
cases = ['runTest']
if self.sortTestMethodsUsing:
sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
return cases
def _haveVisited(self, path):
# For cases where path is None, we always pretend we haven't visited
# them.
if path is None:
return False
return path in self._visitedPaths
def _addVisitedPath(self, path):
if path is not None:
self._visitedPaths.add(path)
def loadTestsFromDir(self, path):
"""Load tests from the directory at path. This is a generator
-- each suite of tests from a module or other file is yielded
and is expected to be executed before the next file is
examined.
"""
log.debug("load from dir %s", path)
plugins = self.config.plugins
plugins.beforeDirectory(path)
if self.config.addPaths:
paths_added = add_path(path, self.config)
entries = os.listdir(path)
sort_list(entries, regex_last_key(self.config.testMatch))
for entry in entries:
# this hard-coded initial-dot test will be removed:
# http://code.google.com/p/python-nose/issues/detail?id=82
if entry.startswith('.'):
continue
entry_path = op_abspath(op_join(path, entry))
is_file = op_isfile(entry_path)
wanted = False
if is_file:
is_dir = False
wanted = self.selector.wantFile(entry_path)
else:
is_dir = op_isdir(entry_path)
if is_dir:
# this hard-coded initial-underscore test will be removed:
# http://code.google.com/p/python-nose/issues/detail?id=82
if entry.startswith('_'):
continue
wanted = self.selector.wantDirectory(entry_path)
is_package = ispackage(entry_path)
# Python 3.3 now implements PEP 420: Implicit Namespace Packages.
# As a result, it's now possible that parent paths that have a
# segment with the same basename as our package ends up
# in module.__path__. So we have to keep track of what we've
# visited, and not-revisit them again.
if wanted and not self._haveVisited(entry_path):
self._addVisitedPath(entry_path)
if is_file:
plugins.beforeContext()
if entry.endswith('.py'):
yield self.loadTestsFromName(
entry_path, discovered=True)
else:
yield self.loadTestsFromFile(entry_path)
plugins.afterContext()
elif is_package:
# Load the entry as a package: given the full path,
# loadTestsFromName() will figure it out
yield self.loadTestsFromName(
entry_path, discovered=True)
else:
# Another test dir in this one: recurse lazily
yield self.suiteClass(
lambda: self.loadTestsFromDir(entry_path))
tests = []
for test in plugins.loadTestsFromDir(path):
tests.append(test)
# TODO: is this try/except needed?
try:
if tests:
yield self.suiteClass(tests)
except (KeyboardInterrupt, SystemExit):
raise
except:
yield self.suiteClass([Failure(*sys.exc_info())])
# pop paths
if self.config.addPaths:
for p in paths_added:
remove_path(p)
plugins.afterDirectory(path)
def loadTestsFromFile(self, filename):
"""Load tests from a non-module file. Default is to raise a
ValueError; plugins may implement `loadTestsFromFile` to
provide a list of tests loaded from the file.
"""
log.debug("Load from non-module file %s", filename)
try:
tests = [test for test in
self.config.plugins.loadTestsFromFile(filename)]
if tests:
# Plugins can yield False to indicate that they were
# unable to load tests from a file, but it was not an
# error -- the file just had no tests to load.
tests = filter(None, tests)
return self.suiteClass(tests)
else:
# Nothing was able to even try to load from this file
open(filename, 'r').close() # trigger os error
raise ValueError("Unable to load tests from file %s"
% filename)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return self.suiteClass(
[Failure(exc[0], exc[1], exc[2],
address=(filename, None, None))])
def loadTestsFromGenerator(self, generator, module):
"""Lazy-load tests from a generator function. The generator function
may yield either:
* a callable, or
* a function name resolvable within the same module
"""
def generate(g=generator, m=module):
try:
for test in g():
test_func, arg = self.parseGeneratedTest(test)
if not callable(test_func):
test_func = getattr(m, test_func)
yield FunctionTestCase(test_func, arg=arg, descriptor=g)
except KeyboardInterrupt:
raise
except:
exc = sys.exc_info()
yield Failure(exc[0], exc[1], exc[2],
address=test_address(generator))
return self.suiteClass(generate, context=generator, can_split=False)
def loadTestsFromGeneratorMethod(self, generator, cls):
"""Lazy-load tests from a generator method.
This is more complicated than loading from a generator function,
since a generator method may yield:
* a function
* a bound or unbound method, or
* a method name
"""
# convert the unbound generator method
# into a bound method so it can be called below
if hasattr(generator, 'im_class'):
cls = generator.im_class
inst = cls()
method = generator.__name__
generator = getattr(inst, method)
def generate(g=generator, c=cls):
try:
for test in g():
test_func, arg = self.parseGeneratedTest(test)
if not callable(test_func):
test_func = unbound_method(c, getattr(c, test_func))
if ismethod(test_func):
yield MethodTestCase(test_func, arg=arg, descriptor=g)
elif callable(test_func):
# In this case we're forcing the 'MethodTestCase'
# to run the inline function as its test call,
# but using the generator method as the 'method of
# record' (so no need to pass it as the descriptor)
yield MethodTestCase(g, test=test_func, arg=arg)
else:
yield Failure(
TypeError,
"%s is not a callable or method" % test_func)
except KeyboardInterrupt:
raise
except:
exc = sys.exc_info()
yield Failure(exc[0], exc[1], exc[2],
address=test_address(generator))
return self.suiteClass(generate, context=generator, can_split=False)
def loadTestsFromModule(self, module, path=None, discovered=False):
"""Load all tests from module and return a suite containing
them. If the module has been discovered and is not test-like,
the suite will be empty by default, though plugins may add
their own tests.
"""
log.debug("Load from module %s", module)
tests = []
test_classes = []
test_funcs = []
# For *discovered* modules, we only load tests when the module looks
# testlike. For modules we've been directed to load, we always
# look for tests. (discovered is set to True by loadTestsFromDir)
if not discovered or self.selector.wantModule(module):
for item in dir(module):
test = getattr(module, item, None)
# print "Check %s (%s) in %s" % (item, test, module.__name__)
if isclass(test):
if self.selector.wantClass(test):
test_classes.append(test)
elif isfunction(test) and self.selector.wantFunction(test):
test_funcs.append(test)
sort_list(test_classes, lambda x: x.__name__)
sort_list(test_funcs, func_lineno)
tests = map(lambda t: self.makeTest(t, parent=module),
test_classes + test_funcs)
# Now, descend into packages
# FIXME can or should this be lazy?
# is this syntax 2.2 compatible?
module_paths = getattr(module, '__path__', [])
if path:
path = os.path.normcase(os.path.realpath(path))
for module_path in module_paths:
log.debug('os.path.normcase(%r): %r', module_path,
os.path.normcase(module_path))
module_path = os.path.normcase(module_path)
log.debug("Load tests from module path %s?", module_path)
log.debug("path: %s os.path.realpath(%s): %s",
path, module_path, os.path.realpath(module_path))
if (self.config.traverseNamespace or not path) or \
os.path.realpath(module_path).startswith(path):
# Egg files can be on sys.path, so make sure the path is a
# directory before trying to load from it.
if os.path.isdir(module_path):
tests.extend(self.loadTestsFromDir(module_path))
for test in self.config.plugins.loadTestsFromModule(module, path):
tests.append(test)
return self.suiteClass(ContextList(tests, context=module))
def loadTestsFromName(self, name, module=None, discovered=False):
"""Load tests from the entity with the given name.
The name may indicate a file, directory, module, or any object
within a module. See `nose.util.split_test_name` for details on
test name parsing.
"""
# FIXME refactor this method into little bites?
log.debug("load from %s (%s)", name, module)
suite = self.suiteClass
# give plugins first crack
plug_tests = self.config.plugins.loadTestsFromName(name, module)
if plug_tests:
return suite(plug_tests)
addr = TestAddress(name, workingDir=self.workingDir)
if module:
# Two cases:
# name is class.foo
# The addr will be incorrect, since it thinks class.foo is
# a dotted module name. It's actually a dotted attribute
# name. In this case we want to use the full submitted
# name as the name to load from the module.
# name is module:class.foo
# The addr will be correct. The part we want is the part after
# the :, which is in addr.call.
if addr.call:
name = addr.call
parent, obj = self.resolve(name, module)
if (isclass(parent)
and getattr(parent, '__module__', None) != module.__name__
and not isinstance(obj, Failure)):
parent = transplant_class(parent, module.__name__)
obj = getattr(parent, obj.__name__)
log.debug("parent %s obj %s module %s", parent, obj, module)
if isinstance(obj, Failure):
return suite([obj])
else:
return suite(ContextList([self.makeTest(obj, parent)],
context=parent))
else:
if addr.module:
try:
if addr.filename is None:
module = resolve_name(addr.module)
else:
self.config.plugins.beforeImport(
addr.filename, addr.module)
# FIXME: to support module.name names,
# do what resolve-name does and keep trying to
# import, popping tail of module into addr.call,
# until we either get an import or run out of
# module parts
try:
module = self.importer.importFromPath(
addr.filename, addr.module)
finally:
self.config.plugins.afterImport(
addr.filename, addr.module)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return suite([Failure(exc[0], exc[1], exc[2],
address=addr.totuple())])
if addr.call:
return self.loadTestsFromName(addr.call, module)
else:
return self.loadTestsFromModule(
module, addr.filename,
discovered=discovered)
elif addr.filename:
path = addr.filename
if addr.call:
package = getpackage(path)
if package is None:
return suite([
Failure(ValueError,
"Can't find callable %s in file %s: "
"file is not a python module" %
(addr.call, path),
address=addr.totuple())])
return self.loadTestsFromName(addr.call, module=package)
else:
if op_isdir(path):
# In this case we *can* be lazy since we know
# that each module in the dir will be fully
# loaded before its tests are executed; we
# also know that we're not going to be asked
# to load from . and ./some_module.py *as part
# of this named test load*
return LazySuite(
lambda: self.loadTestsFromDir(path))
elif op_isfile(path):
return self.loadTestsFromFile(path)
else:
return suite([
Failure(OSError, "No such file %s" % path,
address=addr.totuple())])
else:
# just a function? what to do? I think it can only be
# handled when module is not None
return suite([
Failure(ValueError, "Unresolvable test name %s" % name,
address=addr.totuple())])
def loadTestsFromNames(self, names, module=None):
"""Load tests from all names, returning a suite containing all
tests.
"""
plug_res = self.config.plugins.loadTestsFromNames(names, module)
if plug_res:
suite, names = plug_res
if suite:
return self.suiteClass([
self.suiteClass(suite),
unittest.TestLoader.loadTestsFromNames(self, names, module)
])
return unittest.TestLoader.loadTestsFromNames(self, names, module)
def loadTestsFromTestCase(self, testCaseClass):
"""Load tests from a unittest.TestCase subclass.
"""
cases = []
plugins = self.config.plugins
for case in plugins.loadTestsFromTestCase(testCaseClass):
cases.append(case)
# For efficiency in the most common case, just call and return from
# super. This avoids having to extract cases and rebuild a context
# suite when there are no plugin-contributed cases.
if not cases:
return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
cases.extend(
[case for case in
super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
return self.suiteClass(cases)
def loadTestsFromTestClass(self, cls):
"""Load tests from a test class that is *not* a unittest.TestCase
subclass.
In this case, we can't depend on the class's `__init__` taking method
name arguments, so we have to compose a MethodTestCase for each
method in the class that looks testlike.
"""
def wanted(attr, cls=cls, sel=self.selector):
item = getattr(cls, attr, None)
if isfunction(item):
item = unbound_method(cls, item)
elif not ismethod(item):
return False
return sel.wantMethod(item)
cases = [self.makeTest(getattr(cls, case), cls)
for case in filter(wanted, dir(cls))]
for test in self.config.plugins.loadTestsFromTestClass(cls):
cases.append(test)
return self.suiteClass(ContextList(cases, context=cls))
def makeTest(self, obj, parent=None):
try:
return self._makeTest(obj, parent)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
try:
addr = test_address(obj)
except KeyboardInterrupt:
raise
except:
addr = None
return Failure(exc[0], exc[1], exc[2], address=addr)
def _makeTest(self, obj, parent=None):
"""Given a test object and its parent, return a test case
or test suite.
"""
plug_tests = []
try:
addr = test_address(obj)
except KeyboardInterrupt:
raise
except:
addr = None
for test in self.config.plugins.makeTest(obj, parent):
plug_tests.append(test)
# TODO: is this try/except needed?
try:
if plug_tests:
return self.suiteClass(plug_tests)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return Failure(exc[0], exc[1], exc[2], address=addr)
if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
# This is a Python 3.x 'unbound method'. Wrap it with its
# associated class..
obj = unbound_method(parent, obj)
if isinstance(obj, unittest.TestCase):
return obj
elif isclass(obj):
if parent and obj.__module__ != parent.__name__:
obj = transplant_class(obj, parent.__name__)
if issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
else:
return self.loadTestsFromTestClass(obj)
elif ismethod(obj):
if parent is None:
parent = obj.__class__
if issubclass(parent, unittest.TestCase):
return parent(obj.__name__)
else:
if isgenerator(obj):
return self.loadTestsFromGeneratorMethod(obj, parent)
else:
return MethodTestCase(obj)
elif isfunction(obj):
if parent and obj.__module__ != parent.__name__:
obj = transplant_func(obj, parent.__name__)
if isgenerator(obj):
return self.loadTestsFromGenerator(obj, parent)
else:
return FunctionTestCase(obj)
else:
return Failure(TypeError,
"Can't make a test from %s" % obj,
address=addr)
def resolve(self, name, module):
"""Resolve name within module
"""
obj = module
parts = name.split('.')
for part in parts:
parent, obj = obj, getattr(obj, part, None)
if obj is None:
# no such test
obj = Failure(ValueError, "No such test %s" % name)
return parent, obj
def parseGeneratedTest(self, test):
"""Given the yield value of a test generator, return a func and args.
This is used in the two loadTestsFromGenerator* methods.
"""
if not isinstance(test, tuple): # yield test
test_func, arg = (test, tuple())
elif len(test) == 1: # yield (test,)
test_func, arg = (test[0], tuple())
else: # yield test, foo, bar, ...
assert len(test) > 1 # sanity check
test_func, arg = (test[0], test[1:])
return test_func, arg
defaultTestLoader = TestLoader
|
postla/OpenNFR-E2
|
refs/heads/master
|
lib/python/Screens/PowerTimerEntry.py
|
3
|
from Screens.Screen import Screen
from Components.config import ConfigSelection, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, ConfigInteger, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Button import Button
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.SystemInfo import SystemInfo
from PowerTimer import AFTEREVENT, TIMERTYPE
from time import localtime, mktime, time, strftime
from datetime import datetime
class TimerEntry(Screen, ConfigListScreen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
self.entryDate = None
self.entryService = None
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig()
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.setTitle(_("PowerManager entry"))
self.createSetup("config")
def createConfig(self):
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.WAKEUPTOSTANDBY: "wakeuptostandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby"
}[self.timer.afterEvent]
timertype = {
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[self.timer.timerType]
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
# calculate default values
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
if self.timer.repeated: # repeated
type = "repeated"
if self.timer.repeated == 31: # Mon-Fri
repeated = "weekdays"
elif self.timer.repeated == 127: # daily
repeated = "daily"
else:
flags = self.timer.repeated
repeated = "user"
count = 0
for x in (0, 1, 2, 3, 4, 5, 6):
if flags == 1: # weekly
print "Set to weekday " + str(x)
weekday = x
if flags & 1 == 1: # set user defined flags
day[x] = 1
count += 1
else:
day[x] = 0
flags >>= 1
if count == 1:
repeated = "weekly"
else: # once
type = "once"
repeated = None
weekday = int(strftime("%u", localtime(self.timer.begin))) - 1
day[weekday] = 1
autosleepinstandbyonly = self.timer.autosleepinstandbyonly
autosleepdelay = self.timer.autosleepdelay
autosleeprepeat = self.timer.autosleeprepeat
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.timerentry_timertype = ConfigSelection(choices = [("wakeup", _("wakeup")),("wakeuptostandby", _("wakeup to standby")), ("autostandby", _("auto standby")), ("autodeepstandby", _("auto deepstandby")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("reboot", _("reboot system")), ("restart", _("restart GUI"))], default = timertype)
self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("wakeuptostandby", _("wakeup to standby")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("nothing", _("do nothing"))], default = afterevent)
self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type)
self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("daily", _("daily")), ("weekly", _("weekly")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))])
self.timerrntry_autosleepdelay = ConfigInteger(default=autosleepdelay, limits = (10, 300))
self.timerentry_autosleeprepeat = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = autosleeprepeat)
self.timerrntry_autosleepinstandbyonly = ConfigSelection(choices = [("yes",_("Yes")), ("no", _("No"))],default=autosleepinstandbyonly)
self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_starttime = ConfigClock(default = self.timer.begin)
self.timerentry_endtime = ConfigClock(default = self.timer.end)
self.timerentry_showendtime = ConfigSelection(default = (((self.timer.end - self.timer.begin) /60 ) > 1), choices = [(True, _("yes")), (False, _("no"))])
self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default = day[x]))
def createSetup(self, widget):
self.list = []
self.timerType = getConfigListEntry(_("Timer type"), self.timerentry_timertype)
self.list.append(self.timerType)
if self.timerentry_timertype.getValue() == "autostandby" or self.timerentry_timertype.getValue() == "autodeepstandby":
if self.timerentry_timertype.getValue() == "autodeepstandby":
self.list.append(getConfigListEntry(_("Only active when in standby"), self.timerrntry_autosleepinstandbyonly))
self.list.append(getConfigListEntry(_("Sleep delay"), self.timerrntry_autosleepdelay))
self.list.append(getConfigListEntry(_("Repeat type"), self.timerentry_autosleeprepeat))
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime)
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated)
else:
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type)
self.list.append(self.timerTypeEntry)
if self.timerentry_type.getValue() == "once":
self.frequencyEntry = None
else: # repeated
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated)
self.list.append(self.frequencyEntry)
self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"), self.timerentry_repeatedbegindate)
self.list.append(self.repeatedbegindateEntry)
if self.timerentry_repeated.getValue() == "daily":
pass
if self.timerentry_repeated.getValue() == "weekdays":
pass
if self.timerentry_repeated.getValue() == "weekly":
self.list.append(getConfigListEntry(_("Weekday"), self.timerentry_weekday))
if self.timerentry_repeated.getValue() == "user":
self.list.append(getConfigListEntry(_("Monday"), self.timerentry_day[0]))
self.list.append(getConfigListEntry(_("Tuesday"), self.timerentry_day[1]))
self.list.append(getConfigListEntry(_("Wednesday"), self.timerentry_day[2]))
self.list.append(getConfigListEntry(_("Thursday"), self.timerentry_day[3]))
self.list.append(getConfigListEntry(_("Friday"), self.timerentry_day[4]))
self.list.append(getConfigListEntry(_("Saturday"), self.timerentry_day[5]))
self.list.append(getConfigListEntry(_("Sunday"), self.timerentry_day[6]))
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date)
if self.timerentry_type.getValue() == "once":
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("Start time"), self.timerentry_starttime)
self.list.append(self.entryStartTime)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime)
self.list.append(self.entryShowEndTime)
self.entryEndTime = getConfigListEntry(_("End time"), self.timerentry_endtime)
if self.timerentry_showendtime.getValue():
self.list.append(self.entryEndTime)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
self[widget].list = self.list
self[widget].l.setList(self.list)
def newConfig(self):
if self["config"].getCurrent() in (self.timerType, self.timerTypeEntry, self.frequencyEntry, self.entryShowEndTime):
self.createSetup("config")
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def keySelect(self):
cur = self["config"].getCurrent()
self.keyGo()
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.getValue()
endtime = self.timerentry_endtime.getValue()
starttime = self.timerentry_starttime.getValue()
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
# if the endtime is less than the starttime, add 1 day.
if end < begin:
end += 86400
return begin, end
def keyGo(self, result = None):
if not self.timerentry_showendtime.getValue():
self.timerentry_endtime.value = self.timerentry_starttime.getValue()
self.timer.resetRepeated()
self.timer.timerType = {
"wakeup": TIMERTYPE.WAKEUP,
"wakeuptostandby": TIMERTYPE.WAKEUPTOSTANDBY,
"autostandby": TIMERTYPE.AUTOSTANDBY,
"autodeepstandby": TIMERTYPE.AUTODEEPSTANDBY,
"standby": TIMERTYPE.STANDBY,
"deepstandby": TIMERTYPE.DEEPSTANDBY,
"reboot": TIMERTYPE.REBOOT,
"restart": TIMERTYPE.RESTART
}[self.timerentry_timertype.value]
self.timer.afterEvent = {
"nothing": AFTEREVENT.NONE,
"wakeuptostandby": AFTEREVENT.WAKEUPTOSTANDBY,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY
}[self.timerentry_afterevent.value]
if self.timerentry_type.getValue() == "once":
self.timer.begin, self.timer.end = self.getBeginEnd()
if self.timerentry_timertype.getValue() == "autostandby" or self.timerentry_timertype.getValue() == "autodeepstandby":
self.timer.begin = int(time()) + 10
self.timer.end = self.timer.begin
self.timer.autosleepinstandbyonly = self.timerrntry_autosleepinstandbyonly.getValue()
self.timer.autosleepdelay = self.timerrntry_autosleepdelay.getValue()
self.timer.autosleeprepeat = self.timerentry_autosleeprepeat.getValue()
if self.timerentry_type.getValue() == "repeated":
if self.timerentry_repeated.getValue() == "daily":
for x in (0, 1, 2, 3, 4, 5, 6):
self.timer.setRepeated(x)
if self.timerentry_repeated.getValue() == "weekly":
self.timer.setRepeated(self.timerentry_weekday.index)
if self.timerentry_repeated.getValue() == "weekdays":
for x in (0, 1, 2, 3, 4):
self.timer.setRepeated(x)
if self.timerentry_repeated.getValue() == "user":
for x in (0, 1, 2, 3, 4, 5, 6):
if self.timerentry_day[x].getValue():
self.timer.setRepeated(x)
self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.getValue(), self.timerentry_starttime.getValue())
if self.timer.repeated:
self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.getValue(), self.timerentry_starttime.getValue())
self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.getValue(), self.timerentry_endtime.getValue())
else:
self.timer.begin = self.getTimestamp(time.time(), self.timerentry_starttime.getValue())
self.timer.end = self.getTimestamp(time.time(), self.timerentry_endtime.getValue())
# when a timer end is set before the start, add 1 day
if self.timer.end < self.timer.begin:
self.timer.end += 86400
self.saveTimer()
self.close((True, self.timer))
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [0, 0]:
self.timerentry_date.value += 86400
self["config"].invalidate(self.entryDate)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [23, 59]:
self.timerentry_date.value -= 86400
self["config"].invalidate(self.entryDate)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
def saveTimer(self):
self.session.nav.PowerTimer.saveTimer()
def keyCancel(self):
self.close((False,))
class TimerLog(Screen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.skinName = "TimerLog"
self.timer = timer
self.log_entries = self.timer.log_entries[:]
self.fillLogList()
self["loglist"] = MenuList(self.list)
self["logentry"] = Label()
self["key_red"] = Button(_("Delete entry"))
self["key_green"] = Button()
self["key_yellow"] = Button("")
self["key_blue"] = Button(_("Clear log"))
self.onShown.append(self.updateText)
self["actions"] = NumberActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"ok": self.keyClose,
"cancel": self.keyClose,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"red": self.deleteEntry,
"blue": self.clearLog
}, -1)
self.setTitle(_("PowerManager log"))
def deleteEntry(self):
cur = self["loglist"].getCurrent()
if cur is None:
return
self.log_entries.remove(cur[1])
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def fillLogList(self):
self.list = [(str(strftime("%Y-%m-%d %H-%M", localtime(x[0])) + " - " + x[2]), x) for x in self.log_entries]
def clearLog(self):
self.log_entries = []
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def keyClose(self):
if self.timer.log_entries != self.log_entries:
self.timer.log_entries = self.log_entries
self.close((True, self.timer))
else:
self.close((False,))
def up(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveUp)
self.updateText()
def down(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveDown)
self.updateText()
def left(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageUp)
self.updateText()
def right(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageDown)
self.updateText()
def updateText(self):
if self.list:
self["logentry"].setText(str(self["loglist"].getCurrent()[1][2]))
else:
self["logentry"].setText("")
|
jasonseminara/OpenSourceFinal
|
refs/heads/master
|
myvenv/lib/python3.5/site-packages/django/core/cache/backends/locmem.py
|
586
|
"Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
psywhale/pyswitchvox
|
refs/heads/master
|
bin/elevator_check.py
|
1
|
#!/usr/bin/env python
import sys
import os
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir)))
from check_outboundCall import ElevatorCall as check
def main():
check().run_nagios_check()
if __name__=="__main__":
main()
|
sysadminmatmoz/odoo-clearcorp
|
refs/heads/8.0
|
TODO-8.0/account_voucher_prepayment/wizard/account_voucher_add_payments.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class accountVoucheraddPaymentwizard (osv.osv_memory):
_name = "account.voucher.add.payments"
_description = "Account Voucher Add Payments Wizard"
#===========================================================================
# partner_id and account_id comes from main window. By default property,
# set values from main window and pass them to wizard.
# They also are used in xml, to build principal domain.
#===========================================================================
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner'),
'account_id': fields.many2one('account.account', 'Account'), #it's is for apply the domain in xml.
'move_lines': fields.many2many('account.move.line', 'move_lines_prepayment', required=True, string="Move Lines"),
}
def action_reconcile(self, cr, uid, ids, context=None):
#module_name, id view
dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'view_account_move_line_reconcile_full')
#Get move_lines from invoice
invoice_move_lines_ids = context['invoice_move_lines']
#Get move_lines from wizard
wizard_move_lines = self.read(cr, uid, ids, ['move_lines'], context=context)[0] #return a dictionary
wizard_move_lines_ids = wizard_move_lines['move_lines']
#Put all ids together
all_ids = wizard_move_lines_ids + invoice_move_lines_ids
#Update context
context.update({'active_ids': all_ids})
return {
'name':_("Reconcile"),
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'account.move.line.reconcile',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': "[]",
'context':context,
}
|
eltonsantos/django
|
refs/heads/master
|
tests/utils_tests/test_no_submodule.py
|
737
|
# Used to test for modules which don't have submodules.
|
nandhp/misc-utils
|
refs/heads/master
|
keepassx_domains.py
|
1
|
#!/usr/bin/env python2
#
# Extract list of sites from a KeepassX CSV export. Output is
# formatted for grepping in a list of domains. Written for Cloudbleed.
#
# This script makes some assumptions on how your Title and URL fields
# are filled out, but if one or both is a URL or hostname, it should
# work for you.
#
# Usage:
# # Use KeepassX to create a CSV export /tmp/kpx.csv
# wget -O ~/Downloads/sites-using-cloudflare.zip https://github.com/pirate/sites-using-cloudflare/archive/master.zip
# python keepassx_domains.py < /tmp/kpx.csv > /tmp/kpx.sites
# unzip -c ~/Downloads/sites-using-cloudflare.zip sites-using-cloudflare-master/sorted_unique_cf.txt | grep -f /tmp/kpx.sites
import csv
import urlparse
import sys, os
# Per http://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
# Because csv apparently wants binary data
sys.stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
mycsv = csv.DictReader(sys.stdin)
sites = {}
for data in mycsv:
isites = set()
warnings = set()
for d in data['Title'], data['URL']:
if not d: continue
# Auto-generated passwords from LastPass that were never renamed
if d.lower().startswith('generated password for '):
d = d[len('generated password for '):]
#if 'sample entry' in d.lower() or d.lower().startswith('sample '):
# continue
# Parenthetical comments trailing the domain name
if '(' in d:
d = d[0:d.find('(')].strip()
# Strip URL components, keep hostname only
if '://' in d:
d = urlparse.urlparse(d).netloc
elif '/' in d:
d = d[0:d.find('/')].strip()
if ':' in d:
d = d[0:d.rfind(':')].strip()
if not d: continue
# Skip private IP addresses
if d.startswith('192.') or d.startswith('10.'):
warnings.add("Skipping private IP address '%s'" % (d,))
continue
# Handle '<realm> <hostname>' entries
if ' ' in d:
lw = d[d.rfind(' ')+1:]
if not d.endswith('.') and '.' in lw:
warnings.add("Taking last word from name '%s'" % (d,))
d = lw
else:
warnings.add("Not parsing name '%s'" % (d,))
continue
c = d.strip().split('.')
# Better handling of ccTLDs
tldsize = 1
if len(c[-1]) == 2 and c[-2] in ('co', 'com'):
tldsize = 2
for i in range(len(c)-tldsize):
s = '.'.join(c[i:])
isites.add(s)
if warnings:
sys.stderr.write('%d warnings, found %d domains\n %s\n' %
(len(warnings), len(isites), '\n '.join(warnings)))
for s in isites:
if s not in sites:
sites[s] = []
sites[s].append(data['Username'])
for site, usernames in sorted(sites.items(), key=lambda x: '.'.join(reversed(x[0].split('.')))):
print '\\(^\\|\\.\\)%s$' % (site,)
#print "%s\t%s" % (site, ' '.join(usernames))
|
marctc/django-blog-zinnia
|
refs/heads/develop
|
zinnia/tests/test_url_shortener.py
|
8
|
"""Test cases for Zinnia's url_shortener"""
import warnings
from django.test import TestCase
from django.test.utils import override_settings
from zinnia.url_shortener import get_url_shortener
from zinnia import url_shortener as us_settings
from zinnia.url_shortener.backends import default
class URLShortenerTestCase(TestCase):
"""Test cases for zinnia.url_shortener"""
def setUp(self):
self.original_backend = us_settings.URL_SHORTENER_BACKEND
def tearDown(self):
us_settings.URL_SHORTENER_BACKEND = self.original_backend
def test_get_url_shortener(self):
us_settings.URL_SHORTENER_BACKEND = 'mymodule.myclass'
with warnings.catch_warnings(record=True) as w:
self.assertEqual(get_url_shortener(), default.backend)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
'mymodule.myclass backend cannot be imported')
us_settings.URL_SHORTENER_BACKEND = ('zinnia.tests.implementations.'
'custom_url_shortener')
with warnings.catch_warnings(record=True) as w:
self.assertEqual(get_url_shortener(), default.backend)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(
str(w[-1].message),
'This backend only exists for testing')
us_settings.URL_SHORTENER_BACKEND = 'zinnia.url_shortener'\
'.backends.default'
self.assertEqual(get_url_shortener(), default.backend)
class FakeEntry(object):
"""Fake entry with only 'pk' as attribute"""
def __init__(self, pk):
self.pk = pk
@override_settings(
ROOT_URLCONF='zinnia.tests.implementations.urls.default'
)
class UrlShortenerDefaultBackendTestCase(TestCase):
"""Tests cases for the default url shortener backend"""
def test_backend(self):
original_protocol = default.PROTOCOL
default.PROTOCOL = 'http'
entry = FakeEntry(1)
self.assertEqual(default.backend(entry),
'http://example.com/1/')
default.PROTOCOL = 'https'
entry = FakeEntry(100)
self.assertEqual(default.backend(entry),
'https://example.com/2S/')
default.PROTOCOL = original_protocol
def test_base36(self):
self.assertEqual(default.base36(1), '1')
self.assertEqual(default.base36(100), '2S')
self.assertEqual(default.base36(46656), '1000')
|
vikitripathi/MB-MessApp-API
|
refs/heads/master
|
messApp/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py
|
2754
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
alivecor/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/aggregate_ops_test.py
|
51
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for aggregate_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class AddNTest(test.TestCase):
# AddN special-cases adding the first M inputs to make (N - M) divisible by 8,
# after which it adds the remaining (N - M) tensors 8 at a time in a loop.
# Test N in [1, 10] so we check each special-case from 1 to 9 and one
# iteration of the loop.
_MAX_N = 10
def _supported_types(self):
if test.is_gpu_available():
return [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]
return [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128]
def _buildData(self, shape, dtype):
data = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testAddN(self):
np.random.seed(12345)
with self.test_session(use_gpu=True) as sess:
for dtype in self._supported_types():
for count in range(1, self._MAX_N + 1):
data = [self._buildData((2, 2), dtype) for _ in range(count)]
actual = sess.run(math_ops.add_n(data))
expected = np.sum(np.vstack(
[np.expand_dims(d, 0) for d in data]), axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
def testUnknownShapes(self):
np.random.seed(12345)
with self.test_session(use_gpu=True) as sess:
for dtype in self._supported_types():
data = self._buildData((2, 2), dtype)
for count in range(1, self._MAX_N + 1):
data_ph = array_ops.placeholder(dtype=dtype)
actual = sess.run(math_ops.add_n([data_ph] * count), {data_ph: data})
expected = np.sum(np.vstack([np.expand_dims(data, 0)] * count),
axis=0)
tol = 5e-3 if dtype == dtypes.float16 else 5e-7
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
if __name__ == "__main__":
test.main()
|
vbshah1992/microblog
|
refs/heads/master
|
flask/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/pymysql.py
|
18
|
# mysql/pymysql.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database via the pymysql adapter.
pymysql is available at:
http://code.google.com/p/pymysql/
Connecting
----------
Connect string::
mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply to
the pymysql driver as well.
"""
from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
@classmethod
def dbapi(cls):
return __import__('pymysql')
dialect = MySQLDialect_pymysql
|
Maccimo/intellij-community
|
refs/heads/master
|
python/testData/completion/fromPackageImport/pkg/__init__.py
|
12133432
| |
samvarankashyap/linch-pin
|
refs/heads/develop
|
linchpin/provision/roles/azure/library/__init__.py
|
12133432
| |
yokose-ks/edx-platform
|
refs/heads/gacco3/master
|
common/djangoapps/embargo/__init__.py
|
12133432
| |
GreenRecycleBin/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/__init__.py
|
12133432
| |
yk5/incubator-airflow
|
refs/heads/master
|
airflow/operators/generic_transfer.py
|
2
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is meant to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database. (templated)
:type sql: str
:param destination_table: target table. (templated)
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data. (templated)
:type preoperator: str or list of str
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.log.info("Extracting data from %s", self.source_conn_id)
self.log.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.log.info("Running preoperator")
self.log.info(self.preoperator)
destination_hook.run(self.preoperator)
self.log.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
|
townbull/keystone-dtrust
|
refs/heads/dev/domain-trusts
|
keystone/openstack/common/rpc/dispatcher.py
|
6
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Code for rpc message dispatching.
Messages that come in have a version number associated with them. RPC API
version numbers are in the form:
Major.Minor
For a given message with version X.Y, the receiver must be marked as able to
handle messages of version A.B, where:
A = X
B >= Y
The Major version number would be incremented for an almost completely new API.
The Minor version number would be incremented for backwards compatible changes
to an existing API. A backwards compatible change could be something like
adding a new method, adding an argument to an existing method (but not
requiring it), or changing the type for an existing argument (but still
handling the old type as well).
The conversion over to a versioned API must be done on both the client side and
server side of the API at the same time. However, as the code stands today,
there can be both versioned and unversioned APIs implemented in the same code
base.
EXAMPLES
========
Nova was the first project to use versioned rpc APIs. Consider the compute rpc
API as an example. The client side is in nova/compute/rpcapi.py and the server
side is in nova/compute/manager.py.
Example 1) Adding a new method.
-------------------------------
Adding a new method is a backwards compatible change. It should be added to
nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
have a specific version specified to indicate the minimum API version that must
be implemented for the method to be supported. For example::
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
version='1.1')
In this case, version '1.1' is the first version that supported the
get_host_uptime() method.
Example 2) Adding a new parameter.
----------------------------------
Adding a new parameter to an rpc method can be made backwards compatible. The
RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
The implementation of the method must not expect the parameter to be present.::
def some_remote_method(self, arg1, arg2, newarg=None):
# The code needs to deal with newarg=None for cases
# where an older client sends a message without it.
pass
On the client side, the same changes should be made as in example 1. The
minimum version that supports the new parameter should be specified.
"""
from keystone.openstack.common.rpc import common as rpc_common
from keystone.openstack.common.rpc import serializer as rpc_serializer
class RpcDispatcher(object):
"""Dispatch rpc messages according to the requested API version.
This class can be used as the top level 'manager' for a service. It
contains a list of underlying managers that have an API_VERSION attribute.
"""
def __init__(self, callbacks, serializer=None):
"""Initialize the rpc dispatcher.
:param callbacks: List of proxy objects that are an instance
of a class with rpc methods exposed. Each proxy
object should have an RPC_API_VERSION attribute.
:param serializer: The Serializer object that will be used to
deserialize arguments before the method call and
to serialize the result after it returns.
"""
self.callbacks = callbacks
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcDispatcher, self).__init__()
def _deserialize_args(self, context, kwargs):
"""Helper method called to deserialize args before dispatch.
This calls our serializer on each argument, returning a new set of
args that have been deserialized.
:param context: The request context
:param kwargs: The arguments to be deserialized
:returns: A new set of deserialized args
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.deserialize_entity(context,
arg)
return new_kwargs
def dispatch(self, ctxt, version, method, namespace, **kwargs):
"""Dispatch a message based on a requested version.
:param ctxt: The request context
:param version: The requested API version from the incoming message
:param method: The method requested to be called by the incoming
message.
:param namespace: The namespace for the requested method. If None,
the dispatcher will look for a method on a callback
object with no namespace set.
:param kwargs: A dict of keyword arguments to be passed to the method.
:returns: Whatever is returned by the underlying method that gets
called.
"""
if not version:
version = '1.0'
had_compatible = False
for proxyobj in self.callbacks:
# Check for namespace compatibility
try:
cb_namespace = proxyobj.RPC_API_NAMESPACE
except AttributeError:
cb_namespace = None
if namespace != cb_namespace:
continue
# Check for version compatibility
try:
rpc_api_version = proxyobj.RPC_API_VERSION
except AttributeError:
rpc_api_version = '1.0'
is_compatible = rpc_common.version_is_compatible(rpc_api_version,
version)
had_compatible = had_compatible or is_compatible
if not hasattr(proxyobj, method):
continue
if is_compatible:
kwargs = self._deserialize_args(ctxt, kwargs)
result = getattr(proxyobj, method)(ctxt, **kwargs)
return self.serializer.serialize_entity(ctxt, result)
if had_compatible:
raise AttributeError("No such RPC function '%s'" % method)
else:
raise rpc_common.UnsupportedRpcVersion(version=version)
|
matrix-org/sydent
|
refs/heads/main
|
sydent/terms/terms.py
|
1
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, List, Optional, Set
import yaml
logger = logging.getLogger(__name__)
class Terms:
def __init__(self, yamlObj: Optional[Dict[str, Any]]) -> None:
"""
:param yamlObj: The parsed YAML.
"""
self._rawTerms = yamlObj
def getMasterVersion(self) -> Optional[str]:
"""
:return: The global (master) version of the terms, or None if there
are no terms of service for this server.
"""
version = None if self._rawTerms is None else self._rawTerms["master_version"]
# Ensure we're dealing with unicode.
if version and isinstance(version, bytes):
version = version.decode("UTF-8")
return version
def getForClient(self) -> Dict[str, dict]:
"""
:return: A dict which value for the "policies" key is a dict which contains the
"docs" part of the terms' YAML. That nested dict is empty if no terms.
"""
policies = {}
if self._rawTerms is not None:
for docName, doc in self._rawTerms["docs"].items():
policies[docName] = {
"version": doc["version"],
}
policies[docName].update(doc["langs"])
return {"policies": policies}
def getUrlSet(self) -> Set[str]:
"""
:return: All the URLs for the terms in a set. Empty set if no terms.
"""
urls = set()
if self._rawTerms is not None:
for docName, doc in self._rawTerms["docs"].items():
for langName, lang in doc["langs"].items():
url = lang["url"]
# Ensure we're dealing with unicode.
if url and isinstance(url, bytes):
url = url.decode("UTF-8")
urls.add(url)
return urls
def urlListIsSufficient(self, urls: List[str]) -> bool:
"""
Checks whether the provided list of URLs (which represents the list of terms
accepted by the user) is enough to allow the creation of the user's account.
:param urls: The list of URLs of terms the user has accepted.
:return: Whether the list is sufficient to allow the creation of the user's
account.
"""
agreed = set()
urlset = set(urls)
if self._rawTerms is not None:
for docName, doc in self._rawTerms["docs"].items():
for lang in doc["langs"].values():
if lang["url"] in urlset:
agreed.add(docName)
break
required = set(self._rawTerms["docs"].keys())
return agreed == required
def get_terms(sydent) -> Optional[Terms]:
"""Read and parse terms as specified in the config.
:returns Terms
"""
try:
termsYaml = None
termsPath = sydent.cfg.get("general", "terms.path")
if termsPath == "":
return Terms(None)
with open(termsPath) as fp:
termsYaml = yaml.full_load(fp)
if "master_version" not in termsYaml:
raise Exception("No master version")
if "docs" not in termsYaml:
raise Exception("No 'docs' key in terms")
for docName, doc in termsYaml["docs"].items():
if "version" not in doc:
raise Exception("'%s' has no version" % (docName,))
if "langs" not in doc:
raise Exception("'%s' has no langs" % (docName,))
for langKey, lang in doc["langs"].items():
if "name" not in lang:
raise Exception(
"lang '%s' of doc %s has no name" % (langKey, docName)
)
if "url" not in lang:
raise Exception(
"lang '%s' of doc %s has no url" % (langKey, docName)
)
return Terms(termsYaml)
except Exception:
logger.exception(
"Couldn't read terms file '%s'", sydent.cfg.get("general", "terms.path")
)
return None
|
mcus/SickRage
|
refs/heads/master
|
lib/hachoir_parser/audio/au.py
|
95
|
"""
AU audio file parser
Author: Victor Stinner
Creation: 12 july 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import UInt32, Enum, String, RawBytes
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import displayHandler, filesizeHandler
from hachoir_core.tools import createDict, humanFrequency
class AuFile(Parser):
PARSER_TAGS = {
"id": "sun_next_snd",
"category": "audio",
"file_ext": ("au", "snd"),
"mime": (u"audio/basic",),
"min_size": 24*8,
"magic": ((".snd", 0),),
"description": "Sun/NeXT audio"
}
endian = BIG_ENDIAN
CODEC_INFO = {
1: (8, u"8-bit ISDN u-law"),
2: (8, u"8-bit linear PCM"),
3: (16, u"16-bit linear PCM"),
4: (24, u"24-bit linear PCM"),
5: (32, u"32-bit linear PCM"),
6: (32, u"32-bit IEEE floating point"),
7: (64, u"64-bit IEEE floating point"),
8: (None, u"Fragmented sample data"),
9: (None, u"DSP program"),
10: (8, u"8-bit fixed point"),
11: (16, u"16-bit fixed point"),
12: (24, u"24-bit fixed point"),
13: (32, u"32-bit fixed point"),
18: (16, u"16-bit linear with emphasis"),
19: (16, u"16-bit linear compressed"),
20: (16, u"16-bit linear with emphasis and compression"),
21: (None, u"Music kit DSP commands"),
23: (None, u"4-bit ISDN u-law compressed (CCITT G.721 ADPCM)"),
24: (None, u"ITU-T G.722 ADPCM"),
25: (None, u"ITU-T G.723 3-bit ADPCM"),
26: (None, u"ITU-T G.723 5-bit ADPCM"),
27: (8, u"8-bit ISDN A-law"),
}
# Create bit rate and codec name dictionnaries
BITS_PER_SAMPLE = createDict(CODEC_INFO, 0)
CODEC_NAME = createDict(CODEC_INFO, 1)
VALID_NB_CHANNEL = set((1,2)) # FIXME: 4, 5, 7, 8 channels are supported?
def validate(self):
if self.stream.readBytes(0, 4) != ".snd":
return "Wrong file signature"
if self["channels"].value not in self.VALID_NB_CHANNEL:
return "Invalid number of channel"
return True
def getBitsPerSample(self):
"""
Get bit rate (number of bit per sample per channel),
may returns None if you unable to compute it.
"""
return self.BITS_PER_SAMPLE.get(self["codec"].value)
def createFields(self):
yield String(self, "signature", 4, 'Format signature (".snd")', charset="ASCII")
yield UInt32(self, "data_ofs", "Data offset")
yield filesizeHandler(UInt32(self, "data_size", "Data size"))
yield Enum(UInt32(self, "codec", "Audio codec"), self.CODEC_NAME)
yield displayHandler(UInt32(self, "sample_rate", "Number of samples/second"), humanFrequency)
yield UInt32(self, "channels", "Number of interleaved channels")
size = self["data_ofs"].value - self.current_size // 8
if 0 < size:
yield String(self, "info", size, "Information", strip=" \0", charset="ISO-8859-1")
size = min(self["data_size"].value, (self.size - self.current_size) // 8)
yield RawBytes(self, "audio_data", size, "Audio data")
def createContentSize(self):
return (self["data_ofs"].value + self["data_size"].value) * 8
|
iamahuman/angr
|
refs/heads/master
|
angr/procedures/libc/puts.py
|
1
|
import angr
from angr.sim_type import SimTypeString, SimTypeInt
######################################
# puts
######################################
class puts(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, string):
self.argument_types = {0: self.ty_ptr(SimTypeString())}
self.return_type = SimTypeInt(32, True)
stdout = self.state.posix.get_fd(1)
if stdout is None:
return -1
strlen = angr.SIM_PROCEDURES['libc']['strlen']
length = self.inline_call(strlen, string).ret_expr
out = stdout.write(string, length)
stdout.write_data(self.state.solver.BVV(b'\n'))
return out + 1
|
m11s/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/scipy/optimize/setupscons.py
|
51
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('optimize',parent_package, top_path)
config.add_sconscript('SConstruct')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
eahneahn/free
|
refs/heads/master
|
djangoproject/core/migrations/0036_receiveEmail_fields.py
|
3
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for info in orm.UserInfo.objects.all():
info.receiveEmail_issue_comments = info.receiveAllEmail
info.receiveEmail_issue_work = info.receiveAllEmail
info.receiveEmail_issue_offer = info.receiveAllEmail
info.receiveEmail_issue_payment = info.receiveAllEmail
info.receiveEmail_announcements = info.receiveAllEmail
info.save()
print('update userinfo %s receiveEmail %s' % (info.id, info.receiveAllEmail))
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bitcoin_frespo.moneysent': {
'Meta': {'object_name': 'MoneySent'},
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'from_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'to_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'})
},
'bitcoin_frespo.receiveaddress': {
'Meta': {'object_name': 'ReceiveAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updatedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"})
},
'core.issuecommenthistevent': {
'Meta': {'object_name': 'IssueCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.issuewatch': {
'Meta': {'object_name': 'IssueWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.offer': {
'Meta': {'object_name': 'Offer'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offercomment': {
'Meta': {'object_name': 'OfferComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"})
},
'core.offercommenthistevent': {
'Meta': {'object_name': 'OfferCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OfferComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.offerhistevent': {
'Meta': {'object_name': 'OfferHistEvent'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offerwatch': {
'Meta': {'object_name': 'OfferWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.payment': {
'Meta': {'object_name': 'Payment'},
'bitcoin_receive_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bitcoin_frespo.ReceiveAddress']", 'null': 'True'}),
'bitcoin_transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'fee': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'paykey': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'total_bitcoin_received': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'})
},
'core.paymenthistevent': {
'Meta': {'object_name': 'PaymentHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.paymentpart': {
'Meta': {'object_name': 'PaymentPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_sent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bitcoin_frespo.MoneySent']", 'null': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"})
},
'core.project': {
'Meta': {'object_name': 'Project'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'homeURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.solution': {
'Meta': {'object_name': 'Solution'},
'accepting_payments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.solutionhistevent': {
'Meta': {'object_name': 'SolutionHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bitcoin_receive_address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'brazilianPaypal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_from_userlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paypal_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'paypal_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferred_language_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'realName': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'receiveAllEmail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_offer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_payment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_work': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'screenName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
symmetrical = True
|
cmouse/buildbot
|
refs/heads/master
|
master/buildbot/test/util/www.py
|
3
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import os
import pkg_resources
from io import StringIO
from urllib.parse import parse_qs
from urllib.parse import unquote as urlunquote
from uuid import uuid1
import mock
from twisted.internet import defer
from twisted.web import server
from buildbot.test.fake import fakemaster
from buildbot.util import bytes2unicode
from buildbot.util import unicode2bytes
from buildbot.www import auth
from buildbot.www import authz
class FakeSession:
def __init__(self):
self.user_info = {"anonymous": True}
def updateSession(self, request):
pass
class FakeRequest:
written = b''
finished = False
redirected_to = None
rendered_resource = None
failure = None
method = b'GET'
path = b'/req.path'
responseCode = 200
def __init__(self, path=None):
self.headers = {}
self.input_headers = {}
self.prepath = []
x = path.split(b'?', 1)
if len(x) == 1:
self.path = path
self.args = {}
else:
path, argstring = x
self.path = path
self.args = parse_qs(argstring, 1)
self.uri = self.path
self.postpath = []
for p in path[1:].split(b'/'):
path = urlunquote(bytes2unicode(p))
self.postpath.append(unicode2bytes(path))
self.deferred = defer.Deferred()
def write(self, data):
self.written = self.written + data
def redirect(self, url):
self.redirected_to = url
def render(self, rsrc):
rendered_resource = rsrc
self.deferred.callback(rendered_resource)
def finish(self):
self.finished = True
if self.redirected_to is not None:
self.deferred.callback(dict(redirected=self.redirected_to))
else:
self.deferred.callback(self.written)
def setResponseCode(self, code, text=None):
# twisted > 16 started to assert this
assert isinstance(code, int)
self.responseCode = code
self.responseText = text
def setHeader(self, hdr, value):
assert isinstance(hdr, bytes)
assert isinstance(value, bytes)
self.headers.setdefault(hdr, []).append(value)
def getHeader(self, key):
assert isinstance(key, bytes)
return self.input_headers.get(key)
def processingFailed(self, f):
self.deferred.errback(f)
def notifyFinish(self):
d = defer.Deferred()
@self.deferred.addBoth
def finished(res):
d.callback(res)
return res
return d
def getSession(self):
return self.session
class RequiresWwwMixin:
# mix this into a TestCase to skip if buildbot-www is not installed
if not list(pkg_resources.iter_entry_points('buildbot.www', 'base')):
if 'BUILDBOT_TEST_REQUIRE_WWW' in os.environ:
raise RuntimeError('$BUILDBOT_TEST_REQUIRE_WWW is set but '
'buildbot-www is not installed')
skip = 'buildbot-www not installed'
class WwwTestMixin(RequiresWwwMixin):
UUID = str(uuid1())
def make_master(self, url=None, **kwargs):
master = fakemaster.make_master(self, wantData=True)
self.master = master
master.www = mock.Mock() # to handle the resourceNeedsReconfigs call
master.www.getUserInfos = lambda _: getattr(
self.master.session, "user_info", {"anonymous": True})
cfg = dict(port=None, auth=auth.NoAuth(), authz=authz.Authz())
cfg.update(kwargs)
master.config.www = cfg
if url is not None:
master.config.buildbotURL = url
self.master.session = FakeSession()
self.master.authz = cfg["authz"]
self.master.authz.setMaster(self.master)
return master
def make_request(self, path=None, method=b'GET'):
self.request = FakeRequest(path)
self.request.session = self.master.session
self.request.method = method
return self.request
def render_resource(self, rsrc, path=b'/', accept=None, method=b'GET',
origin=None, access_control_request_method=None,
extraHeaders=None, request=None):
if not request:
request = self.make_request(path, method=method)
if accept:
request.input_headers[b'accept'] = accept
if origin:
request.input_headers[b'origin'] = origin
if access_control_request_method:
request.input_headers[b'access-control-request-method'] = \
access_control_request_method
if extraHeaders is not None:
request.input_headers.update(extraHeaders)
rv = rsrc.render(request)
if rv != server.NOT_DONE_YET:
if rv is not None:
request.write(rv)
request.finish()
return request.deferred
@defer.inlineCallbacks
def render_control_resource(self, rsrc, path=b'/', params=None,
requestJson=None, action="notfound", id=None,
content_type=b'application/json'):
# pass *either* a request or postpath
if params is None:
params = {}
id = id or self.UUID
request = self.make_request(path)
request.method = b"POST"
request.content = StringIO(requestJson or json.dumps(
{"jsonrpc": "2.0", "method": action, "params": params, "id": id}))
request.input_headers = {b'content-type': content_type}
rv = rsrc.render(request)
if rv == server.NOT_DONE_YET:
rv = yield request.deferred
res = json.loads(bytes2unicode(rv))
self.assertIn("jsonrpc", res)
self.assertEqual(res["jsonrpc"], "2.0")
if not requestJson:
# requestJson is used for invalid requests, so don't expect ID
self.assertIn("id", res)
self.assertEqual(res["id"], id)
def assertRequest(self, content=None, contentJson=None, contentType=None,
responseCode=None, contentDisposition=None, headers=None):
if headers is None:
headers = {}
got, exp = {}, {}
if content is not None:
got['content'] = self.request.written
exp['content'] = content
if contentJson is not None:
got['contentJson'] = json.loads(
bytes2unicode(self.request.written))
exp['contentJson'] = contentJson
if contentType is not None:
got['contentType'] = self.request.headers[b'content-type']
exp['contentType'] = [contentType]
if responseCode is not None:
got['responseCode'] = str(self.request.responseCode)
exp['responseCode'] = str(responseCode)
for header, value in headers.items():
got[header] = self.request.headers.get(header)
exp[header] = value
self.assertEqual(got, exp)
|
leokhachatorians/Torrent-Aggregator
|
refs/heads/master
|
torrent_class.py
|
1
|
import requests
from bs4 import BeautifulSoup
from lxml import html
class Torrent():
# For all children of the Torrent class the initial database number
# will ALWAYS be one, makes no sense to do it any other way.
db_num = 1
page_num = None
r = None
soup = None
tree = None
amount = None
titles = None
seeds = None
leeches = None
info = None
magnet_link = None
website_name = None
# Used only for the websites which don't have an access to the magnet link
# on the initial page loaded
magnet_urls = None
def create_complete_url(self, page_number, search_term, Pirate=False, Isohunt=False, Kickass=False):
"""
Arguments:
page_number - The current page location
search_term - The term to be searched
[Pirate, Isohunt, Kickass] - Which url format to create
Description:
Creates and returns the complete url to being parsing data depending
on which torrent website is set to 'True',
"""
if Isohunt:
url = 'https://isohunt.to/torrents/?ihq={0}&Torrent_sort=seeders.desc&Torrent_page={1}'.format(
search_term,
page_number)
elif Pirate:
url = "https://thepiratebay.gd/search/{0}/{1}".format(
search_term,
page_number)
elif Kickass:
url = 'https://kat.cr/usearch/{0}/{1}'.format(
search_term,
page_number)
return url
def create_requests(self, url):
"""
Arguments:
Url - The Url of which to create the requests of
Description:
Creates and returns a requests object of the given url
"""
r = requests.get(url)
return r
def create_soup(self, r, html_parser=False):
"""
Arguments:
r - The requests object
html_parser - Set to true in order to create the soup object with
an html.parser argument.
Description:
Creates and returns the BeautifulSoup object of the given
requests input.
"""
if html_parser:
soup = BeautifulSoup(r.text, 'html.parser')
else:
soup = BeautifulSoup(r.text)
return soup
def create_tree(self, r):
"""
Arguments:
r - The requests object
Description:
Creates and returns the LXML HTML Tree of the given requests
input
"""
tree = html.fromstring(r.text)
return tree
def increase_page_num(self, page_num, db_num, amount):
"""
Arguments:
Page_Num - The argument which refers to the current page number,
this variable will also be returned back to the calling
class.
Db_Num - The current database location
Amount - The amount of which to increase the page number on
the url, as some sites have various url requirements on
what constitutes going to the next page
Description:
Returns a tuple which increases the page number and
the database number of whichever subclass of Torrent uses it.
"""
page_num += amount
db_num += 1
return (page_num, db_num)
def decrease_page_num(self, page_num, db_num, amount, lowest_value):
"""
Arguments:
Page_Num - The argument which refers to the current page number,
this variable will also be returned back to the calling
class.
Db_Num - The current database location
Amount - The amount of which to decrease the page number on
the url.
Lowest Value - The lowest amount the page number can go, also
the actual first page of any search term.
Description:
Returns a tuple which decreases the page number and
the database number of whichever subclass of Torrent uses it.
"""
if not page_num == lowest_value:
page_num -= amount
db_num -= 1
return (page_num, db_num)
def clear_data(self):
"""
Clears all the data garned from the page.
"""
self.r = None
self.soup = None
self.tree = None
self.titles = None
self.seeds = None
self.leeches = None
self.info = None
self.magnet_link = None
self.website_name = None
self.url_to_magnet = None
def bs4_grabber(self, the_soup, class_name=None, others=None):
"""Parses a given BeautifulSoup thing and pulls out the text within
the class given, className must be a string. If not a class name, using
the others keyword can pull out various id tags and whatnot"""
output = []
if class_name:
inside = the_soup.find_all(class_=class_name)
for stuff in inside:
output.append(stuff.text)
return output
else:
inside = the_soup.find_all(others)
for stuff in inside:
output.append(stuff.text)
return output
|
robertmattmueller/sdac-compiler
|
refs/heads/master
|
sympy/mpmath/libmp/libmpf.py
|
23
|
"""
Low-level functions for arbitrary-precision floating-point arithmetic.
"""
__docformat__ = 'plaintext'
import math
from bisect import bisect
import sys
# Importing random is slow
#from random import getrandbits
getrandbits = None
from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils)
from .libintmath import (giant_steps,
trailtable, bctable, lshift, rshift, bitcount, trailing,
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
bin_to_radix)
# We don't pickle tuples directly for the following reasons:
# 1: pickle uses str() for ints, which is inefficient when they are large
# 2: pickle doesn't work for gmpy mpzs
# Both problems are solved by using hex()
if BACKEND == 'sage':
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man), exp, bc
else:
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man)[2:], exp, bc
def from_pickable(x):
sign, man, exp, bc = x
return (sign, MPZ(man, 16), exp, bc)
class ComplexResult(ValueError):
pass
try:
intern
except NameError:
intern = lambda x: x
# All supported rounding modes
round_nearest = intern('n')
round_floor = intern('f')
round_ceiling = intern('c')
round_up = intern('u')
round_down = intern('d')
round_fast = round_down
def prec_to_dps(n):
"""Return number of accurate decimals that can be represented
with a precision of n bits."""
return max(1, int(round(int(n)/3.3219280948873626)-1))
def dps_to_prec(n):
"""Return the number of bits required to represent n decimals
accurately."""
return max(1, int(round((int(n)+1)*3.3219280948873626)))
def repr_dps(n):
"""Return the number of decimal digits required to represent
a number with n-bit precision so that it can be uniquely
reconstructed from the representation."""
dps = prec_to_dps(n)
if dps == 15:
return 17
return dps + 3
#----------------------------------------------------------------------------#
# Some commonly needed float values #
#----------------------------------------------------------------------------#
# Regular number format:
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
fzero = (0, MPZ_ZERO, 0, 0)
fnzero = (1, MPZ_ZERO, 0, 0)
fone = (0, MPZ_ONE, 0, 1)
fnone = (1, MPZ_ONE, 0, 1)
ftwo = (0, MPZ_ONE, 1, 1)
ften = (0, MPZ_FIVE, 1, 3)
fhalf = (0, MPZ_ONE, -1, 1)
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
fnan = (0, MPZ_ZERO, -123, -1)
finf = (0, MPZ_ZERO, -456, -2)
fninf = (1, MPZ_ZERO, -789, -3)
# Was 1e1000; this is broken in Python 2.4
math_float_inf = 1e300 * 1e300
#----------------------------------------------------------------------------#
# Rounding #
#----------------------------------------------------------------------------#
# This function can be used to round a mantissa generally. However,
# we will try to do most rounding inline for efficiency.
def round_int(x, n, rnd):
if rnd == round_nearest:
if x >= 0:
t = x >> (n-1)
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
return (t>>1)+1
else:
return t>>1
else:
return -round_int(-x, n, rnd)
if rnd == round_floor:
return x >> n
if rnd == round_ceiling:
return -((-x) >> n)
if rnd == round_down:
if x >= 0:
return x >> n
return -((-x) >> n)
if rnd == round_up:
if x >= 0:
return -((-x) >> n)
return x >> n
# These masks are used to pick out segments of numbers to determine
# which direction to round when rounding to nearest.
class h_mask_big:
def __getitem__(self, n):
return (MPZ_ONE<<(n-1))-1
h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
h_mask = [h_mask_big(), h_mask_small]
# The >> operator rounds to floor. shifts_down[rnd][sign]
# tells whether this is the right direction to use, or if the
# number should be negated before shifting
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
round_down:(1,1), round_up:(0,0)}
#----------------------------------------------------------------------------#
# Normalization of raw mpfs #
#----------------------------------------------------------------------------#
# This function is called almost every time an mpf is created.
# It has been optimized accordingly.
def _normalize(sign, man, exp, bc, prec, rnd):
"""
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
normalized mantissa. The mantissa is rounded in the specified
direction if its size exceeds the precision. Trailing zero bits
are also stripped from the mantissa to ensure that the
representation is canonical.
Conditions on the input:
* The input must represent a regular (finite) number
* The sign bit must be 0 or 1
* The mantissa must be positive
* The exponent must be an integer
* The bitcount must be exact
If these conditions are not met, use from_man_exp, mpf_pos, or any
of the conversion functions to create normalized raw mpf tuples.
"""
if not man:
return fzero
# Cut mantissa down to size if larger than target precision
n = bc - prec
if n > 0:
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def _normalize1(sign, man, exp, bc, prec, rnd):
"""same as normalize, but with the added condition that
man is odd or zero
"""
if not man:
return fzero
if bc <= prec:
return sign, man, exp, bc
n = bc - prec
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
try:
_exp_types = (int, long)
except NameError:
_exp_types = (int,)
def strict_normalize(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
return _normalize(sign, man, exp, bc, prec, rnd)
def strict_normalize1(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
assert (not man) or (man & 1)
return _normalize1(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
_normalize = gmpy._mpmath_normalize
_normalize1 = gmpy._mpmath_normalize
if BACKEND == 'sage':
_normalize = _normalize1 = sage_utils.normalize
if STRICT:
normalize = strict_normalize
normalize1 = strict_normalize1
else:
normalize = _normalize
normalize1 = _normalize1
#----------------------------------------------------------------------------#
# Conversion functions #
#----------------------------------------------------------------------------#
def from_man_exp(man, exp, prec=None, rnd=round_fast):
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
If no precision is specified, the mantissa is stored exactly."""
man = MPZ(man)
sign = 0
if man < 0:
sign = 1
man = -man
if man < 1024:
bc = bctable[int(man)]
else:
bc = bitcount(man)
if not prec:
if not man:
return fzero
if not man & 1:
if man & 2:
return (sign, man >> 1, exp + 1, bc - 1)
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
return (sign, man, exp, bc)
return normalize(sign, man, exp, bc, prec, rnd)
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
from_man_exp = gmpy._mpmath_create
if BACKEND == 'sage':
from_man_exp = sage_utils.from_man_exp
def from_int(n, prec=0, rnd=round_fast):
"""Create a raw mpf from an integer. If no precision is specified,
the mantissa is stored exactly."""
if not prec:
if n in int_cache:
return int_cache[n]
return from_man_exp(n, 0, prec, rnd)
def to_man_exp(s):
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("mantissa and exponent are undefined for %s" % man)
return man, exp
def to_int(s, rnd=None):
"""Convert a raw mpf to the nearest int. Rounding is done down by
default (same as int(float) in Python), but can be changed. If the
input is inf/nan, an exception is raised."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("cannot convert %s to int" % man)
if exp >= 0:
if sign:
return (-man) << exp
return man << exp
# Make default rounding fast
if not rnd:
if sign:
return -(man >> (-exp))
else:
return man >> (-exp)
if sign:
return round_int(-man, -exp, rnd)
else:
return round_int(man, -exp, rnd)
def mpf_round_int(s, rnd):
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp >= 0:
return s
mag = exp+bc
if mag < 1:
if rnd == round_ceiling:
if sign: return fzero
else: return fone
elif rnd == round_floor:
if sign: return fnone
else: return fzero
elif rnd == round_nearest:
if mag < 0 or man == MPZ_ONE: return fzero
elif sign: return fnone
else: return fone
else:
raise NotImplementedError
return mpf_pos(s, min(bc, mag), rnd)
def mpf_floor(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_floor)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_ceil(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_ceiling)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_nint(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_nearest)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_frac(s, prec=0, rnd=round_fast):
return mpf_sub(s, mpf_floor(s), prec, rnd)
def from_float(x, prec=53, rnd=round_fast):
"""Create a raw mpf from a Python float, rounding if necessary.
If prec >= 53, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=53."""
# frexp only raises an exception for nan on some platforms
if x != x:
return fnan
# in Python2.5 math.frexp gives an exception for float infinity
# in Python2.6 it returns (float infinity, 0)
try:
m, e = math.frexp(x)
except:
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return fnan
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
def to_float(s, strict=False):
"""
Convert a raw mpf to a Python float. The result is exact if the
bitcount of s is <= 53 and no underflow/overflow occurs.
If the number is too large or too small to represent as a regular
float, it will be converted to inf or 0.0. Setting strict=True
forces an OverflowError to be raised instead.
"""
sign, man, exp, bc = s
if not man:
if s == fzero: return 0.0
if s == finf: return math_float_inf
if s == fninf: return -math_float_inf
return math_float_inf/math_float_inf
if sign:
man = -man
try:
if bc < 100:
return math.ldexp(man, exp)
# Try resizing the mantissa. Overflow may still happen here.
n = bc - 53
m = man >> n
return math.ldexp(m, exp + n)
except OverflowError:
if strict:
raise
# Overflow to infinity
if exp + bc > 0:
if sign:
return -math_float_inf
else:
return math_float_inf
# Underflow to zero
return 0.0
def from_rational(p, q, prec, rnd=round_fast):
"""Create a raw mpf from a rational number p/q, round if
necessary."""
return mpf_div(from_int(p), from_int(q), prec, rnd)
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
def to_fixed(s, prec):
"""Convert a raw mpf to a fixed-point big integer"""
sign, man, exp, bc = s
offset = exp + prec
if sign:
if offset >= 0: return (-man) << offset
else: return (-man) >> (-offset)
else:
if offset >= 0: return man << offset
else: return man >> (-offset)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Arithmetic operations, etc. #
#----------------------------------------------------------------------------#
def mpf_rand(prec):
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
in the mantissa."""
global getrandbits
if not getrandbits:
import random
getrandbits = random.getrandbits
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
def mpf_eq(s, t):
"""Test equality of two raw mpfs. This is simply tuple comparison
unless either number is nan, in which case the result is False."""
if not s[1] or not t[1]:
if s == fnan or t == fnan:
return False
return s == t
def mpf_hash(s):
# Duplicate the new hash algorithm introduces in Python 3.2.
if sys.version >= "3.2":
ssign, sman, sexp, sbc = s
# Handle special numbers
if not sman:
if s == fnan: return sys.hash_info.nan
if s == finf: return sys.hash_info.inf
if s == fninf: return -sys.hash_info.inf
h = sman % HASH_MODULUS
if sexp >= 0:
sexp = sexp % HASH_BITS
else:
sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS)
h = (h << sexp) % HASH_MODULUS
if ssign: h = -h
if h == -1: h == -2
return int(h)
else:
try:
# Try to be compatible with hash values for floats and ints
return hash(to_float(s, strict=1))
except OverflowError:
# We must unfortunately sacrifice compatibility with ints here.
# We could do hash(man << exp) when the exponent is positive, but
# this would cause unreasonable inefficiency for large numbers.
return hash(s)
def mpf_cmp(s, t):
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
and 1 if s > t. (Same convention as Python's cmp() function.)"""
# In principle, a comparison amounts to determining the sign of s-t.
# A full subtraction is relatively slow, however, so we first try to
# look at the components.
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
# Handle zeros and special numbers
if not sman or not tman:
if s == fzero: return -mpf_sign(t)
if t == fzero: return mpf_sign(s)
if s == t: return 0
# Follow same convention as Python's cmp for float nan
if t == fnan: return 1
if s == finf: return 1
if t == fninf: return 1
return -1
# Different sides of zero
if ssign != tsign:
if not ssign: return 1
return -1
# This reduces to direct integer comparison
if sexp == texp:
if sman == tman:
return 0
if sman > tman:
if ssign: return -1
else: return 1
else:
if ssign: return 1
else: return -1
# Check position of the highest set bit in each number. If
# different, there is certainly an inequality.
a = sbc + sexp
b = tbc + texp
if ssign:
if a < b: return 1
if a > b: return -1
else:
if a < b: return -1
if a > b: return 1
# Both numbers have the same highest bit. Subtract to find
# how the lower bits compare.
delta = mpf_sub(s, t, 5, round_floor)
if delta[0]:
return -1
return 1
def mpf_lt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) < 0
def mpf_le(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) <= 0
def mpf_gt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) > 0
def mpf_ge(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) >= 0
def mpf_min_max(seq):
min = max = seq[0]
for x in seq[1:]:
if mpf_lt(x, min): min = x
if mpf_gt(x, max): max = x
return min, max
def mpf_pos(s, prec=0, rnd=round_fast):
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
precision)."""
if prec:
sign, man, exp, bc = s
if (not man) and exp:
return s
return normalize1(sign, man, exp, bc, prec, rnd)
return s
def mpf_neg(s, prec=None, rnd=round_fast):
"""Negate a raw mpf (return -s), rounding the result to the
specified precision. The prec argument can be omitted to do the
operation exactly."""
sign, man, exp, bc = s
if not man:
if exp:
if s == finf: return fninf
if s == fninf: return finf
return s
if not prec:
return (1-sign, man, exp, bc)
return normalize1(1-sign, man, exp, bc, prec, rnd)
def mpf_abs(s, prec=None, rnd=round_fast):
"""Return abs(s) of the raw mpf s, rounded to the specified
precision. The prec argument can be omitted to generate an
exact result."""
sign, man, exp, bc = s
if (not man) and exp:
if s == fninf:
return finf
return s
if not prec:
if sign:
return (0, man, exp, bc)
return s
return normalize1(0, man, exp, bc, prec, rnd)
def mpf_sign(s):
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
sign, man, exp, bc = s
if not man:
if s == finf: return 1
if s == fninf: return -1
return 0
return (-1) ** sign
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
"""
Add the two raw mpf values s and t.
With prec=0, no rounding is performed. Note that this can
produce a very large mantissa (potentially too large to fit
in memory) if exponents are far apart.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
tsign ^= _sub
# Standard case: two nonzero, regular numbers
if sman and tman:
offset = sexp - texp
if offset:
if offset > 0:
# Outside precision range; only need to perturb
if offset > 100 and prec:
delta = sbc + sexp - tbc - texp
if delta > prec + 4:
offset = prec + 4
sman <<= offset
if tsign == ssign: sman += 1
else: sman -= 1
return normalize1(ssign, sman, sexp-offset,
bitcount(sman), prec, rnd)
# Add
if ssign == tsign:
man = tman + (sman << offset)
# Subtract
else:
if ssign: man = tman - (sman << offset)
else: man = (sman << offset) - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
elif offset < 0:
# Outside precision range; only need to perturb
if offset < -100 and prec:
delta = tbc + texp - sbc - sexp
if delta > prec + 4:
offset = prec + 4
tman <<= offset
if ssign == tsign: tman += 1
else: tman -= 1
return normalize1(tsign, tman, texp-offset,
bitcount(tman), prec, rnd)
# Add
if ssign == tsign:
man = sman + (tman << -offset)
# Subtract
else:
if tsign: man = sman - (tman << -offset)
else: man = (tman << -offset) - sman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
# Equal exponents; no shifting necessary
if ssign == tsign:
man = tman + sman
else:
if ssign: man = tman - sman
else: man = sman - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize(ssign, man, texp, bc, prec or bc, rnd)
# Handle zeros and special numbers
if _sub:
t = mpf_neg(t)
if not sman:
if sexp:
if s == t or tman or not texp:
return s
return fnan
if tman:
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
return t
if texp:
return t
if sman:
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
return s
def mpf_sub(s, t, prec=0, rnd=round_fast):
"""Return the difference of two raw mpfs, s-t. This function is
simply a wrapper of mpf_add that changes the sign of t."""
return mpf_add(s, t, prec, rnd, 1)
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
"""
Sum a list of mpf values efficiently and accurately
(typically no temporary roundoff occurs). If prec=0,
the final result will not be rounded either.
There may be roundoff error or cancellation if extremely
large exponent differences occur.
With absolute=True, sums the absolute values.
"""
man = 0
exp = 0
max_extra_prec = prec*2 or 1000000 # XXX
special = None
for x in xs:
xsign, xman, xexp, xbc = x
if xman:
if xsign and not absolute:
xman = -xman
delta = xexp - exp
if xexp >= exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
man = xman
exp = xexp
else:
man += (xman << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-xbc > max_extra_prec:
if not man:
man, exp = xman, xexp
else:
man = (man << delta) + xman
exp = xexp
elif xexp:
if absolute:
x = mpf_abs(x)
special = mpf_add(special or fzero, x, 1)
# Will be inf or nan
if special:
return special
return from_man_exp(man, exp, prec, rnd)
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = bitcount(man)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
return normalize(sign, man, exp, bitcount(man), prec, rnd)
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = sbc + tbc - 1
bc += int(man>>bc)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
# Generally n will be small
if n < 1024:
bc += bctable[int(n)] - 1
else:
bc += bitcount(n) - 1
bc += int(man>>bc)
return normalize(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy':
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
else:
mpf_mul = python_mpf_mul
mpf_mul_int = python_mpf_mul_int
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp+n, bc
def mpf_frexp(x):
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
sign, man, exp, bc = x
if not man:
if x == fzero:
return (fzero, 0)
else:
raise ValueError
return mpf_shift(x, -bc-exp), bc+exp
def mpf_div(s, t, prec, rnd=round_fast):
"""Floating-point division"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if not sman or not tman:
if s == fzero:
if t == fzero: raise ZeroDivisionError
if t == fnan: return fnan
return fzero
if t == fzero:
raise ZeroDivisionError
s_special = (not sman) and sexp
t_special = (not tman) and texp
if s_special and t_special:
return fnan
if s == fnan or t == fnan:
return fnan
if not t_special:
if t == fzero:
return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
return fzero
sign = ssign ^ tsign
if tman == 1:
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
# Same strategy as for addition: if there is a remainder, perturb
# the result a few bits outside the precision range before rounding
extra = prec - sbc + tbc + 5
if extra < 5:
extra = 5
quot, rem = divmod(sman<<extra, tman)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
"""Floating-point division n/t with a Python integer as numerator"""
sign, man, exp, bc = t
if not n or not man:
return mpf_div(from_int(n), t, prec, rnd)
if n < 0:
sign ^= 1
n = -n
extra = prec + bc + 5
quot, rem = divmod(n<<extra, man)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
def mpf_mod(s, t, prec, rnd=round_fast):
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ((not sman) and sexp) or ((not tman) and texp):
return fnan
# Important special case: do nothing if t is larger
if ssign == tsign and texp > sexp+sbc:
return s
# Another important special case: this allows us to do e.g. x % 1.0
# to find the fractional part of x, and it will work when x is huge.
if tman == 1 and sexp > texp+tbc:
return fzero
base = min(sexp, texp)
sman = (-1)**ssign * sman
tman = (-1)**tsign * tman
man = (sman << (sexp-base)) % (tman << (texp-base))
if man >= 0:
sign = 0
else:
man = -man
sign = 1
return normalize(sign, man, base, bitcount(man), prec, rnd)
reciprocal_rnd = {
round_down : round_up,
round_up : round_down,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
negative_rnd = {
round_down : round_down,
round_up : round_up,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
def mpf_pow_int(s, n, prec, rnd=round_fast):
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
sign, man, exp, bc = s
if (not man) and exp:
if s == finf:
if n > 0: return s
if n == 0: return fnan
return fzero
if s == fninf:
if n > 0: return [finf, fninf][n & 1]
if n == 0: return fnan
return fzero
return fnan
n = int(n)
if n == 0: return fone
if n == 1: return mpf_pos(s, prec, rnd)
if n == 2:
_, man, exp, bc = s
if not man:
return fzero
man = man*man
if man == 1:
return (0, MPZ_ONE, exp+exp, 1)
bc = bc + bc - 2
bc += bctable[int(man>>bc)]
return normalize1(0, man, exp+exp, bc, prec, rnd)
if n == -1: return mpf_div(fone, s, prec, rnd)
if n < 0:
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
return mpf_div(fone, inverse, prec, rnd)
result_sign = sign & n
# Use exact integer power when the exact mantissa is small
if man == 1:
return (result_sign, MPZ_ONE, exp*n, 1)
if bc*n < 1000:
man **= n
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
# Use directed rounding all the way through to maintain rigorous
# bounds for interval arithmetic
rounds_down = (rnd == round_nearest) or \
shifts_down[rnd][result_sign]
# Now we perform binary exponentiation. Need to estimate precision
# to avoid rounding errors from temporary operations. Roughly log_2(n)
# operations are performed.
workprec = prec + 4*bitcount(n) + 4
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*man
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
if rounds_down:
pm = pm >> (pbc-workprec)
else:
pm = -((-pm) >> (pbc-workprec))
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
man = man*man
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(man >> bc)]
if bc > workprec:
if rounds_down:
man = man >> (bc-workprec)
else:
man = -((-man) >> (bc-workprec))
exp += bc - workprec
bc = workprec
n = n // 2
return normalize(result_sign, pm, pe, pbc, prec, rnd)
def mpf_perturb(x, eps_sign, prec, rnd):
"""
For nonzero x, calculate x + eps with directed rounding, where
eps < prec relatively and eps has the given sign (0 for
positive, 1 for negative).
With rounding to nearest, this is taken to simply normalize
x to the given precision.
"""
if rnd == round_nearest:
return mpf_pos(x, prec, rnd)
sign, man, exp, bc = x
eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
if sign:
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
else:
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
if away:
return mpf_add(x, eps, prec, rnd)
else:
return mpf_pos(x, prec, rnd)
#----------------------------------------------------------------------------#
# Radix conversion #
#----------------------------------------------------------------------------#
def to_digits_exp(s, dps):
"""Helper function for representing the floating-point number s as
a decimal with dps digits. Returns (sign, string, exponent) where
sign is '' or '-', string is the digit string, and exponent is
the decimal exponent as an int.
If inexact, the decimal representation is rounded toward zero."""
# Extract sign first so it doesn't mess up the string digit count
if s[0]:
sign = '-'
s = mpf_neg(s)
else:
sign = ''
_sign, man, exp, bc = s
if not man:
return '', '0', 0
bitprec = int(dps * math.log(10,2)) + 10
# Cut down to size
# TODO: account for precision when doing this
exp_from_1 = exp + bc
if abs(exp_from_1) > 3500:
from .libelefun import mpf_ln2, mpf_ln10
# Set b = int(exp * log(2)/log(10))
# If exp is huge, we must use high-precision arithmetic to
# find the nearest power of ten
expprec = bitcount(abs(exp)) + 5
tmp = from_int(exp)
tmp = mpf_mul(tmp, mpf_ln2(expprec))
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
b = to_int(tmp)
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
_sign, man, exp, bc = s
exponent = b
else:
exponent = 0
# First, calculate mantissa digits by converting to a binary
# fixed-point number and then converting that number to
# a decimal fixed-point number.
fixprec = max(bitprec - exp - bc, 0)
fixdps = int(fixprec / math.log(10,2) + 0.5)
sf = to_fixed(s, fixprec)
sd = bin_to_radix(sf, fixprec, 10, fixdps)
digits = numeral(sd, base=10, size=dps)
exponent += len(digits) - fixdps - 1
return sign, digits, exponent
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
show_zero_exponent=False):
"""
Convert a raw mpf to a decimal floating-point literal with at
most `dps` decimal digits in the mantissa (not counting extra zeros
that may be inserted for visual purposes).
The number will be printed in fixed-point format if the position
of the leading digit is strictly between min_fixed
(default = min(-dps/3,-5)) and max_fixed (default = dps).
To force fixed-point format always, set min_fixed = -inf,
max_fixed = +inf. To force floating-point format, set
min_fixed >= max_fixed.
The literal is formatted so that it can be parsed back to a number
by to_str, float() or Decimal().
"""
# Special numbers
if not s[1]:
if s == fzero:
if dps: t = '0.0'
else: t = '.0'
if show_zero_exponent:
t += 'e+0'
return t
if s == finf: return '+inf'
if s == fninf: return '-inf'
if s == fnan: return 'nan'
raise ValueError
if min_fixed is None: min_fixed = min(-(dps//3), -5)
if max_fixed is None: max_fixed = dps
# to_digits_exp rounds to floor.
# This sometimes kills some instances of "...00001"
sign, digits, exponent = to_digits_exp(s, dps+3)
# No digits: show only .0; round exponent to nearest
if not dps:
if digits[0] in '56789':
exponent += 1
digits = ".0"
else:
# Rounding up kills some instances of "...99999"
if len(digits) > dps and digits[dps] in '56789' and \
(dps < 500 or digits[dps-4:dps] == '9999'):
digits2 = str(int(digits[:dps]) + 1)
if len(digits2) > dps:
digits2 = digits2[:dps]
exponent += 1
digits = digits2
else:
digits = digits[:dps]
# Prettify numbers close to unit magnitude
if min_fixed < exponent < max_fixed:
if exponent < 0:
digits = ("0"*int(-exponent)) + digits
split = 1
else:
split = exponent + 1
if split > dps:
digits += "0"*(split-dps)
exponent = 0
else:
split = 1
digits = (digits[:split] + "." + digits[split:])
if strip_zeros:
# Clean up trailing zeros
digits = digits.rstrip('0')
if digits[-1] == ".":
digits += "0"
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
if exponent >= 0: return sign + digits + "e+" + str(exponent)
if exponent < 0: return sign + digits + "e" + str(exponent)
def str_to_man_exp(x, base=10):
"""Helper function for from_str."""
# Verify that the input is a valid float literal
float(x)
# Split into mantissa, exponent
x = x.lower()
parts = x.split('e')
if len(parts) == 1:
exp = 0
else: # == 2
x = parts[0]
exp = int(parts[1])
# Look for radix point in mantissa
parts = x.split('.')
if len(parts) == 2:
a, b = parts[0], parts[1].rstrip('0')
exp -= len(b)
x = a + b
x = MPZ(int(x, base))
return x, exp
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
def from_str(x, prec, rnd=round_fast):
"""Create a raw mpf from a decimal literal, rounding in the
specified direction if the input number cannot be represented
exactly as a binary floating-point number with the given number of
bits. The literal syntax accepted is the same as for Python
floats.
TODO: the rounding does not work properly for large exponents.
"""
x = x.strip()
if x in special_str:
return special_str[x]
if '/' in x:
p, q = x.split('/')
return from_rational(int(p), int(q), prec, rnd)
man, exp = str_to_man_exp(x, base=10)
# XXX: appropriate cutoffs & track direction
# note no factors of 5
if abs(exp) > 400:
s = from_int(man, prec+10)
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
else:
if exp >= 0:
s = from_int(man * 10**exp, prec, rnd)
else:
s = from_rational(man, 10**-exp, prec, rnd)
return s
# Binary string conversion. These are currently mainly used for debugging
# and could use some improvement in the future
def from_bstr(x):
man, exp = str_to_man_exp(x, base=2)
man = MPZ(man)
sign = 0
if man < 0:
man = -man
sign = 1
bc = bitcount(man)
return normalize(sign, man, exp, bc, bc, round_floor)
def to_bstr(x):
sign, man, exp, bc = x
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
#----------------------------------------------------------------------------#
# Square roots #
#----------------------------------------------------------------------------#
def mpf_sqrt(s, prec, rnd=round_fast):
"""
Compute the square root of a nonnegative mpf value. The
result is correctly rounded.
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("square root of a negative number")
if not man:
return s
if exp & 1:
exp -= 1
man <<= 1
bc += 1
elif man == 1:
return normalize1(sign, man, exp//2, bc, prec, rnd)
shift = max(4, 2*prec-bc+4)
shift += shift & 1
if rnd in 'fd':
man = isqrt(man<<shift)
else:
man, rem = sqrtrem(man<<shift)
# Perturb up
if rem:
man = (man<<1)+1
shift += 2
return from_man_exp(man, (exp-shift)//2, prec, rnd)
def mpf_hypot(x, y, prec, rnd=round_fast):
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
x and y."""
if y == fzero: return mpf_abs(x, prec, rnd)
if x == fzero: return mpf_abs(y, prec, rnd)
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
return mpf_sqrt(hypot2, prec, rnd)
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as ext_lib
mpf_add = ext_lib.mpf_add
mpf_sub = ext_lib.mpf_sub
mpf_mul = ext_lib.mpf_mul
mpf_div = ext_lib.mpf_div
mpf_sqrt = ext_lib.mpf_sqrt
except ImportError:
pass
|
40223137/w17w17
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testcallable.py
|
739
|
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import unittest
from unittest.test.testmock.support import is_instance, X, SomeClass
from unittest.mock import (
Mock, MagicMock, NonCallableMagicMock,
NonCallableMock, patch, create_autospec,
CallableMixin
)
class TestCallable(unittest.TestCase):
def assertNotCallable(self, mock):
self.assertTrue(is_instance(mock, NonCallableMagicMock))
self.assertFalse(is_instance(mock, CallableMixin))
def test_non_callable(self):
for mock in NonCallableMagicMock(), NonCallableMock():
self.assertRaises(TypeError, mock)
self.assertFalse(hasattr(mock, '__call__'))
self.assertIn(mock.__class__.__name__, repr(mock))
def test_heirarchy(self):
self.assertTrue(issubclass(MagicMock, Mock))
self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock))
def test_attributes(self):
one = NonCallableMock()
self.assertTrue(issubclass(type(one.one), Mock))
two = NonCallableMagicMock()
self.assertTrue(issubclass(type(two.two), MagicMock))
def test_subclasses(self):
class MockSub(Mock):
pass
one = MockSub()
self.assertTrue(issubclass(type(one.one), MockSub))
class MagicSub(MagicMock):
pass
two = MagicSub()
self.assertTrue(issubclass(type(two.two), MagicSub))
def test_patch_spec(self):
patcher = patch('%s.X' % __name__, spec=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_set(self):
patcher = patch('%s.X' % __name__, spec_set=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_instance(self):
patcher = patch('%s.X' % __name__, spec=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_set_instance(self):
patcher = patch('%s.X' % __name__, spec_set=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_callable_class(self):
class CallableX(X):
def __call__(self):
pass
class Sub(CallableX):
pass
class Multi(SomeClass, Sub):
pass
for arg in 'spec', 'spec_set':
for Klass in CallableX, Sub, Multi:
with patch('%s.X' % __name__, **{arg: Klass}) as mock:
instance = mock()
mock.assert_called_once_with()
self.assertTrue(is_instance(instance, MagicMock))
# inherited spec
self.assertRaises(AttributeError, getattr, instance,
'foobarbaz')
result = instance()
# instance is callable, result has no spec
instance.assert_called_once_with()
result(3, 2, 1)
result.assert_called_once_with(3, 2, 1)
result.foo(3, 2, 1)
result.foo.assert_called_once_with(3, 2, 1)
def test_create_autopsec(self):
mock = create_autospec(X)
instance = mock()
self.assertRaises(TypeError, instance)
mock = create_autospec(X())
self.assertRaises(TypeError, mock)
def test_create_autospec_instance(self):
mock = create_autospec(SomeClass, instance=True)
self.assertRaises(TypeError, mock)
mock.wibble()
mock.wibble.assert_called_once_with()
self.assertRaises(TypeError, mock.wibble, 'some', 'args')
|
pqtoan/mathics
|
refs/heads/larion/quotient
|
mathics/builtin/arithmetic.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Arithmetic functions
Basic arithmetic functions, including complex number arithmetic.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
import sympy
import mpmath
from mathics.builtin.base import (
Builtin, Predefined, BinaryOperator, PrefixOperator, PostfixOperator, Test,
SympyFunction, SympyConstant)
from mathics.core.expression import (
Expression, Number, Integer, Rational, Real, Symbol, Complex, String)
from mathics.core.numbers import (
min_prec, dps, SpecialValueError)
from mathics.builtin.lists import _IterationFunction
from mathics.core.convert import from_sympy
class _MPMathFunction(SympyFunction):
attributes = ('Listable', 'NumericFunction')
mpmath_name = None
nargs = 1
def get_mpmath_function(self, args):
if self.mpmath_name is None or len(args) != self.nargs:
return None
return getattr(mpmath, self.mpmath_name)
def apply(self, z, evaluation):
'%(name)s[z__]'
args = z.numerify(evaluation).get_sequence()
mpmath_function = self.get_mpmath_function(args)
result = None
# if no arguments are inexact attempt to use sympy
if all(not x.is_inexact() for x in args):
result = Expression(self.get_name(), *args).to_sympy()
result = self.prepare_mathics(result)
result = from_sympy(result)
# evaluate leaves to convert e.g. Plus[2, I] -> Complex[2, 1]
return result.evaluate_leaves(evaluation)
elif mpmath_function is None:
return
if not all(isinstance(arg, Number) for arg in args):
return
if any(arg.is_machine_precision() for arg in args):
# if any argument has machine precision then the entire calculation
# is done with machine precision.
float_args = [arg.round().get_float_value(permit_complex=True) for arg in args]
if None in float_args:
return
result = self.call_mpmath(mpmath_function, float_args)
if isinstance(result, (mpmath.mpc, mpmath.mpf)):
if mpmath.isinf(result) and isinstance(result, mpmath.mpc):
result = Symbol('ComplexInfinity')
elif mpmath.isinf(result) and result > 0:
result = Expression('DirectedInfinity', Integer(1))
elif mpmath.isinf(result) and result < 0:
result = Expression('DirectedInfinity', Integer(-1))
elif mpmath.isnan(result):
result = Symbol('Indeterminate')
else:
result = Number.from_mpmath(result)
else:
prec = min_prec(*args)
d = dps(prec)
args = [Expression('N', arg, Integer(d)).evaluate(evaluation) for arg in args]
with mpmath.workprec(prec):
mpmath_args = [x.to_mpmath() for x in args]
if None in mpmath_args:
return
result = self.call_mpmath(mpmath_function, mpmath_args)
if isinstance(result, (mpmath.mpc, mpmath.mpf)):
result = Number.from_mpmath(result, d)
return result
def call_mpmath(self, mpmath_function, mpmath_args):
try:
return mpmath_function(*mpmath_args)
except ValueError as exc:
text = str(exc)
if text == 'gamma function pole':
return Symbol('ComplexInfinity')
else:
raise
except ZeroDivisionError:
return
except SpecialValueError as exc:
return Symbol(exc.name)
class _MPMathMultiFunction(_MPMathFunction):
sympy_names = None
mpmath_names = None
def get_sympy_names(self):
if self.sympy_names is None:
return [self.sympy_name]
return self.sympy_names.values()
def get_function(self, module, names, fallback_name, leaves):
try:
name = fallback_name
if names is not None:
name = names[len(leaves)]
return getattr(module, name)
except KeyError:
return None
def get_sympy_function(self, leaves):
return self.get_function(
sympy, self.sympy_names, self.sympy_name, leaves)
def get_mpmath_function(self, leaves):
return self.get_function(
mpmath, self.mpmath_names, self.mpmath_name, leaves)
class Plus(BinaryOperator, SympyFunction):
"""
<dl>
<dt>'Plus[$a$, $b$, ...]'</dt>
<dt>$a$ + $b$ + ...</dt>
<dd>represents the sum of the terms $a$, $b$, ...
</dl>
>> 1 + 2
= 3
'Plus' performs basic simplification of terms:
>> a + b + a
= 2 a + b
>> a + a + 3 * a
= 5 a
>> a + b + 4.5 + a + b + a + 2 + 1.5 b
= 6.5 + 3 a + 3.5 b
Apply 'Plus' on a list to sum up its elements:
>> Plus @@ {2, 4, 6}
= 12
The sum of the first 1000 integers:
>> Plus @@ Range[1000]
= 500500
'Plus' has default value 0:
>> DefaultValues[Plus]
= {HoldPattern[Default[Plus]] :> 0}
>> a /. n_. + x_ :> {n, x}
= {0, a}
The sum of 2 red circles and 3 red circles is...
>> 2 Graphics[{Red,Disk[]}] + 3 Graphics[{Red,Disk[]}]
= 5 -Graphics-
#> -2a - 2b
= -2 a - 2 b
#> -4+2x+2*Sqrt[3]
= -4 + 2 Sqrt[3] + 2 x
#> 2a-3b-c
= 2 a - 3 b - c
#> 2a+5d-3b-2c-e
= 2 a - 3 b - 2 c + 5 d - e
#> 1 - I * Sqrt[3]
= 1 - I Sqrt[3]
#> Head[3 + 2 I]
= Complex
#> N[Pi, 30] + N[E, 30]
= 5.85987448204883847382293085463
#> % // Precision
= 30.
"""
operator = '+'
precedence = 310
attributes = ('Flat', 'Listable', 'NumericFunction',
'OneIdentity', 'Orderless', 'Protected')
default_formats = False
defaults = {
None: '0',
}
sympy_name = 'Add'
def format_plus(self, items, evaluation):
'Plus[items__]'
def negate(item):
if item.has_form('Times', 1, None):
if isinstance(item.leaves[0], Number):
neg = -item.leaves[0]
if neg.same(Integer(1)):
if len(item.leaves) == 1:
return neg
else:
return Expression('Times', *item.leaves[1:])
else:
return Expression('Times', neg, *item.leaves[1:])
else:
return Expression('Times', -1, *item.leaves)
elif isinstance(item, Number):
return -item.to_sympy()
else:
return Expression('Times', -1, item)
def is_negative(value):
if isinstance(value, Complex):
real, imag = value.to_sympy().as_real_imag()
if real <= 0 and imag <= 0:
return True
elif isinstance(value, Number) and value.to_sympy() < 0:
return True
return False
items = items.get_sequence()
values = [Expression('HoldForm', item) for item in items[:1]]
ops = []
for item in items[1:]:
if (item.has_form('Times', 1, None) and is_negative(item.leaves[0])) or is_negative(item):
item = negate(item)
op = "-"
else:
op = "+"
values.append(Expression('HoldForm', item))
ops.append(String(op))
return Expression('Infix', Expression('List', *values),
Expression('List', *ops), 310, Symbol('Left'))
def apply(self, items, evaluation):
'Plus[items___]'
items = items.numerify(evaluation).get_sequence()
leaves = []
last_item = last_count = None
prec = min_prec(*items)
is_machine_precision = any(item.is_machine_precision() for item in items)
numbers = []
def append_last():
if last_item is not None:
if last_count == 1:
leaves.append(last_item)
else:
if last_item.has_form('Times', None):
last_item.leaves.insert(0, from_sympy(last_count))
leaves.append(last_item)
else:
leaves.append(Expression(
'Times', from_sympy(last_count), last_item))
for item in items:
if isinstance(item, Number):
numbers.append(item)
else:
count = rest = None
if item.has_form('Times', None):
for leaf in item.leaves:
if isinstance(leaf, Number):
count = leaf.to_sympy()
rest = item.leaves[:]
rest.remove(leaf)
if len(rest) == 1:
rest = rest[0]
else:
rest.sort()
rest = Expression('Times', *rest)
break
if count is None:
count = sympy.Integer(1)
rest = item
if last_item is not None and last_item == rest:
last_count = last_count + count
else:
append_last()
last_item = rest
last_count = count
append_last()
if numbers:
if prec is not None:
if is_machine_precision:
numbers = [item.to_mpmath() for item in numbers]
number = mpmath.fsum(numbers)
number = Number.from_mpmath(number)
else:
with mpmath.workprec(prec):
numbers = [item.to_mpmath() for item in numbers]
number = mpmath.fsum(numbers)
number = Number.from_mpmath(number, dps(prec))
else:
number = from_sympy(sum(item.to_sympy() for item in numbers))
else:
number = Integer(0)
if not number.same(Integer(0)):
leaves.insert(0, number)
if not leaves:
return Integer(0)
elif len(leaves) == 1:
return leaves[0]
else:
leaves.sort()
return Expression('Plus', *leaves)
class Subtract(BinaryOperator):
"""
<dl>
<dt>'Subtract[$a$, $b$]'</dt>
<dt>$a$ - $b$</dt>
<dd>represents the subtraction of $b$ from $a$.</dd>
</dl>
>> 5 - 3
= 2
>> a - b // FullForm
= Plus[a, Times[-1, b]]
>> a - b - c
= a - b - c
>> a - (b - c)
= a - b + c
"""
operator = '-'
precedence_parse = 311
precedence = 310
attributes = ('Listable', 'NumericFunction')
grouping = 'Left'
rules = {
'Subtract[x_, y_]': 'Plus[x, Times[-1, y]]',
}
class Minus(PrefixOperator):
"""
<dl>
<dt>'Minus[$expr$]'
<dd> is the negation of $expr$.
</dl>
>> -a //FullForm
= Times[-1, a]
'Minus' automatically distributes:
>> -(x - 2/3)
= 2 / 3 - x
'Minus' threads over lists:
>> -Range[10]
= {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}
"""
operator = '-'
precedence = 480
attributes = ('Listable', 'NumericFunction')
rules = {
'Minus[x_]': 'Times[-1, x]',
}
formats = {
'Minus[x_]': 'Prefix[{HoldForm[x]}, "-", 480]',
# don't put e.g. -2/3 in parentheses
'Minus[expr_Divide]': 'Prefix[{HoldForm[expr]}, "-", 399]',
'Minus[Infix[expr_, op_, 400, grouping_]]': (
'Prefix[{Infix[expr, op, 400, grouping]}, "-", 399]'),
}
def apply_int(self, x, evaluation):
'Minus[x_Integer]'
return Integer(-x.to_sympy())
def create_infix(items, operator, prec, grouping):
if len(items) == 1:
return items[0]
else:
return Expression('Infix', Expression('List', *items),
String(operator), prec, Symbol(grouping))
class Times(BinaryOperator, SympyFunction):
"""
<dl>
<dt>'Times[$a$, $b$, ...]'</dt>
<dt>'$a$ * $b$ * ...'</dt>
<dt>'$a$ $b$ ...'</dt>
<dd>represents the product of the terms $a$, $b$, ...
</dl>
>> 10 * 2
= 20
>> 10 2
= 20
>> a * a
= a ^ 2
>> x ^ 10 * x ^ -2
= x ^ 8
>> {1, 2, 3} * 4
= {4, 8, 12}
>> Times @@ {1, 2, 3, 4}
= 24
>> IntegerLength[Times@@Range[5000]]
= 16326
'Times' has default value 1:
>> DefaultValues[Times]
= {HoldPattern[Default[Times]] :> 1}
>> a /. n_. * x_ :> {n, x}
= {1, a}
#> -a*b // FullForm
= Times[-1, a, b]
#> -(x - 2/3)
= 2 / 3 - x
#> -x*2
= -2 x
#> -(h/2) // FullForm
= Times[Rational[-1, 2], h]
#> x / x
= 1
#> 2x^2 / x^2
= 2
#> 3. Pi
= 9.42478
#> Head[3 * I]
= Complex
#> Head[Times[I, 1/2]]
= Complex
#> Head[Pi * I]
= Times
#> 3 * a //InputForm
= 3*a
#> 3 * a //OutputForm
= 3 a
#> -2.123456789 x
= -2.12346 x
#> -2.123456789 I
= 0. - 2.12346 I
#> N[Pi, 30] * I
= 3.14159265358979323846264338328 I
#> N[I Pi, 30]
= 3.14159265358979323846264338328 I
#> N[Pi * E, 30]
= 8.53973422267356706546355086955
#> N[Pi, 30] * N[E, 30]
= 8.53973422267356706546355086955
#> N[Pi, 30] * E
= 8.53973422267356706546355086955
#> % // Precision
= 30.
"""
operator = '*'
operator_display = ' '
precedence = 400
attributes = ('Flat', 'Listable', 'NumericFunction',
'OneIdentity', 'Orderless', 'Protected')
defaults = {
None: '1',
}
default_formats = False
sympy_name = 'Mul'
rules = {
}
formats = {
}
def format_times(self, items, evaluation, op='\u2062'):
'Times[items__]'
def inverse(item):
if item.has_form('Power', 2) and isinstance( # noqa
item.leaves[1], (Integer, Rational, Real)):
neg = -item.leaves[1]
if neg.same(Integer(1)):
return item.leaves[0]
else:
return Expression('Power', item.leaves[0], neg)
else:
return item
items = items.get_sequence()
positive = []
negative = []
for item in items:
if (item.has_form('Power', 2) and
isinstance(item.leaves[1], (Integer, Rational, Real)) and
item.leaves[1].to_sympy() < 0): # nopep8
negative.append(inverse(item))
elif isinstance(item, Rational):
numerator = item.numerator()
if not numerator.same(Integer(1)):
positive.append(numerator)
negative.append(item.denominator())
else:
positive.append(item)
if positive and positive[0].get_int_value() == -1:
del positive[0]
minus = True
else:
minus = False
positive = [Expression('HoldForm', item) for item in positive]
negative = [Expression('HoldForm', item) for item in negative]
if positive:
positive = create_infix(positive, op, 400, 'None')
else:
positive = Integer(1)
if negative:
negative = create_infix(negative, op, 400, 'None')
result = Expression('Divide', Expression(
'HoldForm', positive), Expression('HoldForm', negative))
else:
result = positive
if minus:
result = Expression(
'Minus', result) # Expression('PrecedenceForm', result, 481))
result = Expression('HoldForm', result)
return result
def format_inputform(self, items, evaluation):
'InputForm: Times[items__]'
return self.format_times(items, evaluation, op='*')
def format_standardform(self, items, evaluation):
'StandardForm: Times[items__]'
return self.format_times(items, evaluation, op=' ')
def format_outputform(self, items, evaluation):
'OutputForm: Times[items__]'
return self.format_times(items, evaluation, op=' ')
def apply(self, items, evaluation):
'Times[items___]'
items = items.numerify(evaluation).get_sequence()
leaves = []
numbers = []
prec = min_prec(*items)
is_machine_precision = any(item.is_machine_precision() for item in items)
# find numbers and simplify Times -> Power
for item in items:
if isinstance(item, Number):
numbers.append(item)
elif leaves and item == leaves[-1]:
leaves[-1] = Expression('Power', leaves[-1], Integer(2))
elif (leaves and item.has_form('Power', 2) and
leaves[-1].has_form('Power', 2) and
item.leaves[0].same(leaves[-1].leaves[0])):
leaves[-1].leaves[1] = Expression(
'Plus', item.leaves[1], leaves[-1].leaves[1])
elif (leaves and item.has_form('Power', 2) and
item.leaves[0].same(leaves[-1])):
leaves[-1] = Expression(
'Power', leaves[-1],
Expression('Plus', item.leaves[1], Integer(1)))
elif (leaves and leaves[-1].has_form('Power', 2) and
leaves[-1].leaves[0].same(item)):
leaves[-1] = Expression('Power', item, Expression(
'Plus', Integer(1), leaves[-1].leaves[1]))
else:
leaves.append(item)
if numbers:
if prec is not None:
if is_machine_precision:
numbers = [item.to_mpmath() for item in numbers]
number = mpmath.fprod(numbers)
number = Number.from_mpmath(number)
else:
with mpmath.workprec(prec):
numbers = [item.to_mpmath() for item in numbers]
number = mpmath.fprod(numbers)
number = Number.from_mpmath(number, dps(prec))
else:
number = sympy.Mul(*[item.to_sympy() for item in numbers])
number = from_sympy(number)
else:
number = Integer(1)
if number.same(Integer(1)):
number = None
elif number.is_zero:
return number
elif number.same(Integer(-1)) and leaves and leaves[0].has_form('Plus', None):
leaves[0].leaves = [Expression('Times', Integer(-1), leaf)
for leaf in leaves[0].leaves]
number = None
for leaf in leaves:
leaf.last_evaluated = None
if number is not None:
leaves.insert(0, number)
if not leaves:
return Integer(1)
elif len(leaves) == 1:
return leaves[0]
else:
return Expression('Times', *leaves)
class Divide(BinaryOperator):
"""
<dl>
<dt>'Divide[$a$, $b$]'</dt>
<dt>'$a$ / $b$'</dt>
<dd>represents the division of $a$ by $b$.
</dl>
>> 30 / 5
= 6
>> 1 / 8
= 1 / 8
>> Pi / 4
= Pi / 4
Use 'N' or a decimal point to force numeric evaluation:
>> Pi / 4.0
= 0.785398
>> 1 / 8
= 1 / 8
>> N[%]
= 0.125
Nested divisions:
>> a / b / c
= a / (b c)
>> a / (b / c)
= a c / b
>> a / b / (c / (d / e))
= a d / (b c e)
>> a / (b ^ 2 * c ^ 3 / e)
= a e / (b ^ 2 c ^ 3)
#> 1 / 4.0
= 0.25
#> 10 / 3 // FullForm
= Rational[10, 3]
#> a / b // FullForm
= Times[a, Power[b, -1]]
"""
operator = '/'
precedence = 470
attributes = ('Listable', 'NumericFunction')
grouping = 'Left'
default_formats = False
rules = {
'Divide[x_, y_]': 'Times[x, Power[y, -1]]',
'MakeBoxes[Divide[x_, y_], f:StandardForm|TraditionalForm]': (
'FractionBox[MakeBoxes[x, f], MakeBoxes[y, f]]'),
}
formats = {
(('InputForm', 'OutputForm'), 'Divide[x_, y_]'): (
'Infix[{HoldForm[x], HoldForm[y]}, "/", 400, Left]'),
}
class Power(BinaryOperator, _MPMathFunction):
"""
<dl>
<dt>'Power[$a$, $b$]'</dt>
<dt>'$a$ ^ $b$'</dt>
<dd>represents $a$ raised to the power of $b$.
</dl>
>> 4 ^ (1/2)
= 2
>> 4 ^ (1/3)
= 2 ^ (2 / 3)
>> 3^123
= 48519278097689642681155855396759336072749841943521979872827
>> (y ^ 2) ^ (1/2)
= Sqrt[y ^ 2]
>> (y ^ 2) ^ 3
= y ^ 6
>> Plot[Evaluate[Table[x^y, {y, 1, 5}]], {x, -1.5, 1.5}, AspectRatio -> 1]
= -Graphics-
Use a decimal point to force numeric evaluation:
>> 4.0 ^ (1/3)
= 1.5874
'Power' has default value 1 for its second argument:
>> DefaultValues[Power]
= {HoldPattern[Default[Power, 2]] :> 1}
>> a /. x_ ^ n_. :> {x, n}
= {a, 1}
'Power' can be used with complex numbers:
>> (1.5 + 1.0 I) ^ 3.5
= -3.68294 + 6.95139 I
>> (1.5 + 1.0 I) ^ (3.5 + 1.5 I)
= -3.19182 + 0.645659 I
#> 1/0
: Infinite expression 1 / 0 encountered.
= ComplexInfinity
#> 0 ^ -2
: Infinite expression 1 / 0 ^ 2 encountered.
= ComplexInfinity
#> 0 ^ (-1/2)
: Infinite expression 1 / Sqrt[0] encountered.
= ComplexInfinity
#> 0 ^ -Pi
: Infinite expression 1 / 0 ^ 3.14159 encountered.
= ComplexInfinity
#> 0 ^ (2 I E)
: Indeterminate expression 0 ^ (0. + 5.43656 I) encountered.
= Indeterminate
#> 0 ^ - (Pi + 2 E I)
: Infinite expression 0 ^ (-3.14159 - 5.43656 I) encountered.
= ComplexInfinity
#> 0 ^ 0
: Indeterminate expression 0 ^ 0 encountered.
= Indeterminate
#> Sqrt[-3+2. I]
= 0.550251 + 1.81735 I
#> Sqrt[-3+2 I]
= Sqrt[-3 + 2 I]
#> (3/2+1/2I)^2
= 2 + 3 I / 2
#> I ^ I
= I ^ I
#> 2 ^ 2.0
= 4.
#> Pi ^ 4.
= 97.4091
#> a ^ b
= a ^ b
"""
operator = '^'
precedence = 590
attributes = ('Listable', 'NumericFunction', 'OneIdentity')
grouping = 'Right'
default_formats = False
sympy_name = 'Pow'
mpmath_name = 'power'
nargs = 2
messages = {
'infy': "Infinite expression `1` encountered.",
'indet': 'Indeterminate expression `1` encountered.',
}
defaults = {
2: '1',
}
formats = {
Expression('Power', Expression('Pattern', Symbol('x'),
Expression('Blank')), Rational(1, 2)): 'HoldForm[Sqrt[x]]',
(('InputForm', 'OutputForm'), 'x_ ^ y_'): (
'Infix[{HoldForm[x], HoldForm[y]}, "^", 590, Right]'),
('', 'x_ ^ y_'): (
'PrecedenceForm[Superscript[OuterPrecedenceForm[HoldForm[x], 590],'
' HoldForm[y]], 590]'),
('', 'x_ ^ y_?Negative'): (
'HoldForm[Divide[1, #]]&[If[y==-1, HoldForm[x], HoldForm[x]^-y]]'),
}
rules = {
'Power[]': '1',
'Power[x_]': 'x',
}
def apply_check(self, x, y, evaluation):
'Power[x_, y_]'
# Power uses _MPMathFunction but does some error checking first
if isinstance(x, Number) and x.is_zero:
if isinstance(y, Number):
y_err = y
else:
y_err = Expression('N', y).evaluate(evaluation)
if isinstance(y_err, Number):
py_y = y_err.round_to_float(permit_complex=True).real
if py_y > 0:
return x
elif py_y == 0.0:
evaluation.message('Power', 'indet', Expression('Power', x, y_err))
return Symbol('Indeterminate')
elif py_y < 0:
evaluation.message('Power', 'infy', Expression('Power', x, y_err))
return Symbol('ComplexInfinity')
result = self.apply(Expression('Sequence', x, y), evaluation)
if result is None or result != Symbol('Null'):
return result
class Sqrt(SympyFunction):
"""
<dl>
<dt>'Sqrt[$expr$]'
<dd>returns the square root of $expr$.
</dl>
>> Sqrt[4]
= 2
>> Sqrt[5]
= Sqrt[5]
>> Sqrt[5] // N
= 2.23607
>> Sqrt[a]^2
= a
Complex numbers:
>> Sqrt[-4]
= 2 I
>> I == Sqrt[-1]
= True
>> Plot[Sqrt[a^2], {a, -2, 2}]
= -Graphics-
#> N[Sqrt[2], 50]
= 1.4142135623730950488016887242096980785696718753769
"""
attributes = ('Listable', 'NumericFunction')
rules = {
'Sqrt[x_]': 'x ^ (1/2)',
'MakeBoxes[Sqrt[x_], f:StandardForm|TraditionalForm]': (
'SqrtBox[MakeBoxes[x, f]]'),
}
class Infinity(SympyConstant):
"""
<dl>
<dt>'Infinity'
<dd>represents an infinite real quantity.
</dl>
>> 1 / Infinity
= 0
>> Infinity + 100
= Infinity
Use 'Infinity' in sum and limit calculations:
>> Sum[1/x^2, {x, 1, Infinity}]
= Pi ^ 2 / 6
#> FullForm[Infinity]
= DirectedInfinity[1]
#> (2 + 3.5*I) / Infinity
= 0. + 0. I
#> Infinity + Infinity
= Infinity
#> Infinity / Infinity
: Indeterminate expression 0 Infinity encountered.
= Indeterminate
"""
sympy_name = 'oo'
rules = {
'Infinity': 'DirectedInfinity[1]',
'MakeBoxes[Infinity, f:StandardForm|TraditionalForm]': (
'"\\[Infinity]"'),
}
class ComplexInfinity(SympyConstant):
"""
<dl>
<dt>'ComplexInfinity'
<dd>represents an infinite complex quantity of undetermined direction.
</dl>
>> 1 / ComplexInfinity
= 0
>> ComplexInfinity + ComplexInfinity
= ComplexInfinity
>> ComplexInfinity * Infinity
= ComplexInfinity
>> FullForm[ComplexInfinity]
= DirectedInfinity[]
"""
sympy_name = 'ComplexInfinity'
rules = {
'ComplexInfinity': 'DirectedInfinity[]',
}
class DirectedInfinity(SympyFunction):
"""
<dl>
<dt>'DirectedInfinity[$z$]'</dt>
<dd>represents an infinite multiple of the complex number $z$.
<dt>'DirectedInfinity[]'</dt>
<dd>is the same as 'ComplexInfinity'.</dd>
</dl>
>> DirectedInfinity[1]
= Infinity
>> DirectedInfinity[]
= ComplexInfinity
>> DirectedInfinity[1 + I]
= (1 / 2 + I / 2) Sqrt[2] Infinity
>> 1 / DirectedInfinity[1 + I]
= 0
>> DirectedInfinity[1] + DirectedInfinity[-1]
: Indeterminate expression -Infinity + Infinity encountered.
= Indeterminate
#> DirectedInfinity[1+I]+DirectedInfinity[2+I]
= (2 / 5 + I / 5) Sqrt[5] Infinity + (1 / 2 + I / 2) Sqrt[2] Infinity
#> DirectedInfinity[Sqrt[3]]
= Infinity
"""
rules = {
'DirectedInfinity[args___] ^ -1': '0',
'0 * DirectedInfinity[args___]': 'Message[Infinity::indet, Unevaluated[0 DirectedInfinity[args]]]; Indeterminate',
'DirectedInfinity[a_?NumericQ] /; N[Abs[a]] != 1': 'DirectedInfinity[a / Abs[a]]',
'DirectedInfinity[a_] * DirectedInfinity[b_]': 'DirectedInfinity[a*b]',
'DirectedInfinity[] * DirectedInfinity[args___]': 'DirectedInfinity[]',
'DirectedInfinity[0]': 'DirectedInfinity[]',
'z_?NumberQ * DirectedInfinity[]': 'DirectedInfinity[]',
'z_?NumberQ * DirectedInfinity[a_]': 'DirectedInfinity[z * a]',
'DirectedInfinity[a_] + DirectedInfinity[b_] /; b == -a': (
'Message[Infinity::indet,'
' Unevaluated[DirectedInfinity[a] + DirectedInfinity[b]]];'
'Indeterminate'),
'DirectedInfinity[args___] + _?NumberQ': 'DirectedInfinity[args]',
}
formats = {
'DirectedInfinity[1]': 'HoldForm[Infinity]',
'DirectedInfinity[-1]': 'HoldForm[-Infinity]',
'DirectedInfinity[]': 'HoldForm[ComplexInfinity]',
'DirectedInfinity[z_?NumericQ]': 'HoldForm[z Infinity]',
}
def to_sympy(self, expr, **kwargs):
if len(expr.leaves) == 1:
dir = expr.leaves[0].get_int_value()
if dir == 1:
return sympy.oo
elif dir == -1:
return -sympy.oo
class Re(SympyFunction):
"""
<dl>
<dt>'Re[$z$]'
<dd>returns the real component of the complex number $z$.
</dl>
>> Re[3+4I]
= 3
>> Plot[{Cos[a], Re[E^(I a)]}, {a, 0, 2 Pi}]
= -Graphics-
#> Im[0.5 + 2.3 I]
= 2.3
#> % // Precision
= MachinePrecision
"""
attributes = ('Listable', 'NumericFunction')
def apply_complex(self, number, evaluation):
'Re[number_Complex]'
return number.real
def apply_number(self, number, evaluation):
'Re[number_?NumberQ]'
return number
class Im(SympyFunction):
"""
<dl>
<dt>'Im[$z$]'
<dd>returns the imaginary component of the complex number $z$.
</dl>
>> Im[3+4I]
= 4
>> Plot[{Sin[a], Im[E^(I a)]}, {a, 0, 2 Pi}]
= -Graphics-
#> Re[0.5 + 2.3 I]
= 0.5
#> % // Precision
= MachinePrecision
"""
attributes = ('Listable', 'NumericFunction')
def apply_complex(self, number, evaluation):
'Im[number_Complex]'
return number.imag
def apply_number(self, number, evaluation):
'Im[number_?NumberQ]'
return Integer(0)
class Conjugate(_MPMathFunction):
"""
<dl>
<dt>'Conjugate[$z$]'
<dd>returns the complex conjugate of the complex number $z$.
</dl>
>> Conjugate[3 + 4 I]
= 3 - 4 I
>> Conjugate[3]
= 3
>> Conjugate[a + b * I]
= Conjugate[a] - I Conjugate[b]
>> Conjugate[{{1, 2 + I 4, a + I b}, {I}}]
= {{1, 2 - 4 I, Conjugate[a] - I Conjugate[b]}, {-I}}
## Issue #272
#> {Conjugate[Pi], Conjugate[E]}
= {Pi, E}
>> Conjugate[1.5 + 2.5 I]
= 1.5 - 2.5 I
"""
mpmath_name = 'conj'
class Abs(_MPMathFunction):
"""
<dl>
<dt>'Abs[$x$]'
<dd>returns the absolute value of $x$.
</dl>
>> Abs[-3]
= 3
'Abs' returns the magnitude of complex numbers:
>> Abs[3 + I]
= Sqrt[10]
>> Abs[3.0 + I]
= 3.16228
>> Plot[Abs[x], {x, -4, 4}]
= -Graphics-
#> Abs[I]
= 1
#> Abs[a - b]
= Abs[a - b]
#> Abs[Sqrt[3]]
= Sqrt[3]
"""
sympy_name = 'Abs'
mpmath_name = 'fabs' # mpmath actually uses python abs(x) / x.__abs__()
class I(Predefined):
"""
<dl>
<dt>'I'
<dd>represents the imaginary number 'Sqrt[-1]'.
</dl>
>> I^2
= -1
>> (3+I)*(3-I)
= 10
"""
def evaluate(self, evaluation):
return Complex(Integer(0), Integer(1))
class Indeterminate(SympyConstant):
"""
<dl>
<dt>'Indeterminate'
<dd>represents an indeterminate result.
</dl>
>> 0^0
: Indeterminate expression 0 ^ 0 encountered.
= Indeterminate
>> Tan[Indeterminate]
= Indeterminate
"""
sympy_name = 'nan'
class NumberQ(Test):
"""
<dl>
<dt>'NumberQ[$expr$]'
<dd>returns 'True' if $expr$ is an explicit number, and 'False' otherwise.
</dl>
>> NumberQ[3+I]
= True
>> NumberQ[5!]
= True
>> NumberQ[Pi]
= False
"""
def test(self, expr):
return isinstance(expr, Number)
class RealNumberQ(Test):
"""
<dl>
<dt>'RealNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is an explicit number with no imaginary component.
</dl>
>> RealNumberQ[10]
= True
>> RealNumberQ[4.0]
= True
>> RealNumberQ[1+I]
= False
>> RealNumberQ[0 * I]
= True
>> RealNumberQ[0.0 * I]
= False
"""
def test(self, expr):
return isinstance(expr, (Integer, Rational, Real))
class MachineNumberQ(Test):
'''
<dl>
<dt>'MachineNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is a machine-precision real or complex number.
</dl>
= True
>> MachineNumberQ[3.14159265358979324]
= False
>> MachineNumberQ[1.5 + 2.3 I]
= True
>> MachineNumberQ[2.71828182845904524 + 3.14159265358979324 I]
= False
#> MachineNumberQ[1.5 + 3.14159265358979324 I]
= True
#> MachineNumberQ[1.5 + 5 I]
= True
'''
def test(self, expr):
return expr.is_machine_precision()
class ExactNumberQ(Test):
"""
<dl>
<dt>'ExactNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is an exact number, and 'False' otherwise.
</dl>
>> ExactNumberQ[10]
= True
>> ExactNumberQ[4.0]
= False
>> ExactNumberQ[n]
= False
'ExactNumberQ' can be applied to complex numbers:
>> ExactNumberQ[1 + I]
= True
>> ExactNumberQ[1 + 1. I]
= False
"""
def test(self, expr):
return isinstance(expr, Number) and not expr.is_inexact()
class InexactNumberQ(Test):
"""
<dl>
<dt>'InexactNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is not an exact number, and 'False' otherwise.
</dl>
>> InexactNumberQ[a]
= False
>> InexactNumberQ[3.0]
= True
>> InexactNumberQ[2/3]
= False
'InexactNumberQ' can be applied to complex numbers:
>> InexactNumberQ[4.0+I]
= True
"""
def test(self, expr):
return isinstance(expr, Number) and expr.is_inexact()
class IntegerQ(Test):
"""
<dl>
<dt>'IntegerQ[$expr$]'
<dd>returns 'True' if $expr$ is an integer, and 'False' otherwise.
</dl>
>> IntegerQ[3]
= True
>> IntegerQ[Pi]
= False
"""
def test(self, expr):
return isinstance(expr, Integer)
class Integer_(Builtin):
"""
<dl>
<dt>'Integer'
<dd>is the head of integers.
</dl>
>> Head[5]
= Integer
## Test large Integer comparison bug
#> {a, b} = {2^10000, 2^10000 + 1}; {a == b, a < b, a <= b}
= {False, True, True}
"""
name = 'Integer'
class Real_(Builtin):
"""
<dl>
<dt>'Real'
<dd>is the head of real (inexact) numbers.
</dl>
>> x = 3. ^ -20;
>> InputForm[x]
= 2.8679719907924413*^-10
>> Head[x]
= Real
## Formatting tests
#> 1. * 10^6
= 1.*^6
#> 1. * 10^5
= 100000.
#> -1. * 10^6
= -1.*^6
#> -1. * 10^5
= -100000.
#> 1. * 10^-6
= 1.*^-6
#> 1. * 10^-5
= 0.00001
#> -1. * 10^-6
= -1.*^-6
#> -1. * 10^-5
= -0.00001
## Mathematica treats zero strangely
#> 0.0000000000000
= 0.
#> 0.0000000000000000000000000000
= 0.*^-28
## Parse *^ Notation
#> 1.5*^24
= 1.5*^24
#> 1.5*^+24
= 1.5*^24
#> 1.5*^-24
= 1.5*^-24
## Don't accept *^ with spaces
#> 1.5 *^10
: "1.5 *" cannot be followed by "^10" (line 1 of "<test>").
#> 1.5*^ 10
: "1.5*" cannot be followed by "^ 10" (line 1 of "<test>").
## Issue654
#> 1^^2
: Requested base 1 in 1^^2 should be between 2 and 36.
: Expression cannot begin with "1^^2" (line 1 of "<test>").
#> 2^^0101
= 5
#> 2^^01210
: Digit at position 3 in 01210 is too large to be used in base 2.
: Expression cannot begin with "2^^01210" (line 1 of "<test>").
#> 16^^5g
: Digit at position 2 in 5g is too large to be used in base 16.
: Expression cannot begin with "16^^5g" (line 1 of "<test>").
#> 36^^0123456789abcDEFxyzXYZ
= 14142263610074677021975869033659
#> 37^^3
: Requested base 37 in 37^^3 should be between 2 and 36.
: Expression cannot begin with "37^^3" (line 1 of "<test>").
"""
name = 'Real'
class Rational_(Builtin):
"""
<dl>
<dt>'Rational'</dt>
<dd>is the head of rational numbers.</dd>
<dt>'Rational[$a$, $b$]'</dt>
<dd>constructs the rational number $a$ / $b$.</dd>
</dl>
>> Head[1/2]
= Rational
>> Rational[1, 2]
= 1 / 2
#> -2/3
= -2 / 3
"""
name = 'Rational'
def apply(self, n, m, evaluation):
'Rational[n_Integer, m_Integer]'
if m.to_sympy() == 1:
return Integer(n.to_sympy())
else:
return Rational(n.to_sympy(), m.to_sympy())
class Complex_(Builtin):
"""
<dl>
<dt>'Complex'
<dd>is the head of complex numbers.
<dt>'Complex[$a$, $b$]'
<dd>constructs the complex number '$a$ + I $b$'.
</dl>
>> Head[2 + 3*I]
= Complex
>> Complex[1, 2/3]
= 1 + 2 I / 3
>> Abs[Complex[3, 4]]
= 5
#> OutputForm[Complex[2.0 ^ 40, 3]]
= 1.09951*^12 + 3. I
#> InputForm[Complex[2.0 ^ 40, 3]]
= 1.099511627776*^12 + 3.*I
#> -2 / 3 - I
= -2 / 3 - I
#> Complex[10, 0]
= 10
#> 0. + I
= 0. + 1. I
#> 1 + 0 I
= 1
#> Head[%]
= Integer
#> Complex[0.0, 0.0]
= 0. + 0. I
#> 0. I
= 0. + 0. I
#> 0. + 0. I
= 0. + 0. I
#> 1. + 0. I
= 1. + 0. I
#> 0. + 1. I
= 0. + 1. I
## Check Nesting Complex
#> Complex[1, Complex[0, 1]]
= 0
#> Complex[1, Complex[1, 0]]
= 1 + I
#> Complex[1, Complex[1, 1]]
= I
"""
name = 'Complex'
def apply(self, r, i, evaluation):
'Complex[r_?NumberQ, i_?NumberQ]'
if isinstance(r, Complex) or isinstance(i, Complex):
sym_form = r.to_sympy() + sympy.I * i.to_sympy()
r, i = sym_form.simplify().as_real_imag()
r, i = from_sympy(r), from_sympy(i)
return Complex(r, i)
class Factorial(PostfixOperator, _MPMathFunction):
"""
<dl>
<dt>'Factorial[$n$]'
<dt>'$n$!'
<dd>computes the factorial of $n$.
</dl>
>> 20!
= 2432902008176640000
'Factorial' handles numeric (real and complex) values using the gamma function:
>> 10.5!
= 1.18994*^7
>> (-3.0+1.5*I)!
= 0.0427943 - 0.00461565 I
However, the value at poles is 'ComplexInfinity':
>> (-1.)!
= ComplexInfinity
'Factorial' has the same operator ('!') as 'Not', but with higher precedence:
>> !a! //FullForm
= Not[Factorial[a]]
#> 0!
= 1
"""
operator = '!'
precedence = 610
mpmath_name = 'factorial'
class Gamma(_MPMathMultiFunction):
"""
<dl>
<dt>'Gamma[$z$]'
<dd>is the gamma function on the complex number $z$.
<dt>'Gamma[$z$, $x$]'
<dd>is the upper incomplete gamma function.
<dt>'Gamma[$z$, $x0$, $x1$]'
<dd>is equivalent to 'Gamma[$z$, $x0$] - Gamma[$z$, $x1$]'.
</dl>
'Gamma[$z$]' is equivalent to '($z$ - 1)!':
>> Simplify[Gamma[z] - (z - 1)!]
= 0
Exact arguments:
>> Gamma[8]
= 5040
>> Gamma[1/2]
= Sqrt[Pi]
>> Gamma[1, x]
= E ^ (-x)
>> Gamma[0, x]
= ExpIntegralE[1, x]
Numeric arguments:
>> Gamma[123.78]
= 4.21078*^204
>> Gamma[1. + I]
= 0.498016 - 0.15495 I
Both 'Gamma' and 'Factorial' functions are continuous:
>> Plot[{Gamma[x], x!}, {x, 0, 4}]
= -Graphics-
## Issue 203
#> N[Gamma[24/10], 100]
= 1.242169344504305404913070252268300492431517240992022966055507541481863694148882652446155342679460339
#> N[N[Gamma[24/10],100]/N[Gamma[14/10],100],100]
= 1.400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
#> % // Precision
= 100.
#> Gamma[1.*^20]
: Overflow occurred in computation.
= Overflow[]
## Needs mpmath support for lowergamma
#> Gamma[1., 2.]
= Gamma[1., 2.]
"""
mpmath_names = {
1: 'gamma',
}
sympy_names = {
1: 'gamma',
2: 'uppergamma',
}
rules = {
'Gamma[z_, x0_, x1_]': 'Gamma[z, x0] - Gamma[z, x1]',
'Gamma[1 + z_]': 'z!',
}
def get_sympy_names(self):
return ['gamma', 'uppergamma', 'lowergamma']
def from_sympy(self, sympy_name, leaves):
if sympy_name == 'lowergamma':
# lowergamma(z, x) -> Gamma[z, 0, x]
z, x = leaves
return Expression(
self.get_name(), z, Integer(0), x)
else:
return Expression(self.get_name(), *leaves)
class Pochhammer(SympyFunction):
"""
<dl>
<dt>'Pochhammer[$a$, $n$]'
<dd>is the Pochhammer symbol (a)_n.
</dl>
>> Pochhammer[4, 8]
= 6652800
"""
sympy_name = 'RisingFactorial'
rules = {
'Pochhammer[a_, n_]': 'Gamma[a + n] / Gamma[a]',
}
class HarmonicNumber(_MPMathFunction):
"""
<dl>
<dt>'HarmonicNumber[n]'
<dd>returns the $n$th harmonic number.
</dl>
>> Table[HarmonicNumber[n], {n, 8}]
= {1, 3 / 2, 11 / 6, 25 / 12, 137 / 60, 49 / 20, 363 / 140, 761 / 280}
>> HarmonicNumber[3.8]
= 2.03806
#> HarmonicNumber[-1.5]
= 0.613706
"""
rules = {
'HarmonicNumber[-1]': 'ComplexInfinity',
}
sympy_name = 'harmonic'
mpmath_name = 'harmonic'
class Sum(_IterationFunction, SympyFunction):
"""
<dl>
<dt>'Sum[$expr$, {$i$, $imin$, $imax$}]'
<dd>evaluates the discrete sum of $expr$ with $i$ ranging from $imin$ to $imax$.
<dt>'Sum[$expr$, {$i$, $imax$}]'
<dd>same as 'Sum[$expr$, {$i$, 1, $imax$}]'.
<dt>'Sum[$expr$, {$i$, $imin$, $imax$, $di$}]'
<dd>$i$ ranges from $imin$ to $imax$ in steps of $di$.
<dt>'Sum[$expr$, {$i$, $imin$, $imax$}, {$j$, $jmin$, $jmax$}, ...]'
<dd>evaluates $expr$ as a multiple sum, with {$i$, ...}, {$j$, ...}, ... being in outermost-to-innermost order.
</dl>
>> Sum[k, {k, 1, 10}]
= 55
Double sum:
>> Sum[i * j, {i, 1, 10}, {j, 1, 10}]
= 3025
Symbolic sums are evaluated:
>> Sum[k, {k, 1, n}]
= n (1 + n) / 2
>> Sum[k, {k, n, 2 n}]
= 3 n (1 + n) / 2
>> Sum[k, {k, I, I + 1}]
= 1 + 2 I
>> Sum[1 / k ^ 2, {k, 1, n}]
= HarmonicNumber[n, 2]
Verify algebraic identities:
>> Sum[x ^ 2, {x, 1, y}] - y * (y + 1) * (2 * y + 1) / 6
= 0
>> (-1 + a^n) Sum[a^(k n), {k, 0, m-1}] // Simplify
= Piecewise[{{m (-1 + a ^ n), a ^ n == 1}}, -1 + (a ^ n) ^ m]
Infinite sums:
>> Sum[1 / 2 ^ i, {i, 1, Infinity}]
= 1
>> Sum[1 / k ^ 2, {k, 1, Infinity}]
= Pi ^ 2 / 6
#> a=Sum[x^k*Sum[y^l,{l,0,4}],{k,0,4}]]
: "a=Sum[x^k*Sum[y^l,{l,0,4}],{k,0,4}]" cannot be followed by "]" (line 1 of "<test>").
## Issue431
#> Sum[2^(-i), {i, 1, \[Infinity]}]
= 1
## Issue302
#> Sum[i / Log[i], {i, 1, Infinity}]
= Sum[i / Log[i], {i, 1, Infinity}]
#> Sum[Cos[Pi i], {i, 1, Infinity}]
= Sum[Cos[Pi i], {i, 1, Infinity}]
"""
# Do not throw warning message for symbolic iteration bounds
throw_iterb = False
sympy_name = 'Sum'
rules = _IterationFunction.rules.copy()
rules.update({
'MakeBoxes[Sum[f_, {i_, a_, b_, 1}],'
' form:StandardForm|TraditionalForm]': (
r'RowBox[{SubsuperscriptBox["\[Sum]",'
r' RowBox[{MakeBoxes[i, form], "=", MakeBoxes[a, form]}],'
r' MakeBoxes[b, form]], MakeBoxes[f, form]}]'),
})
def get_result(self, items):
return Expression('Plus', *items)
def to_sympy(self, expr, **kwargs):
if expr.has_form('Sum', 2) and expr.leaves[1].has_form('List', 3):
index = expr.leaves[1]
arg = expr.leaves[0].to_sympy()
bounds = (index.leaves[0].to_sympy(), index.leaves[1].to_sympy(), index.leaves[2].to_sympy())
if arg is not None and None not in bounds:
return sympy.summation(arg, bounds)
class Product(_IterationFunction, SympyFunction):
"""
<dl>
<dt>'Product[$expr$, {$i$, $imin$, $imax$}]'
<dd>evaluates the discrete product of $expr$ with $i$ ranging from $imin$ to $imax$.
<dt>'Product[$expr$, {$i$, $imax$}]'
<dd>same as 'Product[$expr$, {$i$, 1, $imax$}]'.
<dt>'Product[$expr$, {$i$, $imin$, $imax$, $di$}]'
<dd>$i$ ranges from $imin$ to $imax$ in steps of $di$.
<dt>'Product[$expr$, {$i$, $imin$, $imax$}, {$j$, $jmin$, $jmax$}, ...]'
<dd>evaluates $expr$ as a multiple product, with {$i$, ...}, {$j$, ...}, ... being in outermost-to-innermost order.
</dl>
>> Product[k, {k, 1, 10}]
= 3628800
>> 10!
= 3628800
>> Product[x^k, {k, 2, 20, 2}]
= x ^ 110
>> Product[2 ^ i, {i, 1, n}]
= 2 ^ (n / 2 + n ^ 2 / 2)
Symbolic products involving the factorial are evaluated:
>> Product[k, {k, 3, n}]
= n! / 2
Evaluate the $n$th primorial:
>> primorial[0] = 1;
>> primorial[n_Integer] := Product[Prime[k], {k, 1, n}];
>> primorial[12]
= 7420738134810
## Used to be a bug in sympy, but now it is solved exactly!
## Again a bug in sympy - regressions between 0.7.3 and 0.7.6 (and 0.7.7?)
## #> Product[1 + 1 / i ^ 2, {i, Infinity}]
## = 1 / ((-I)! I!)
"""
throw_iterb = False
sympy_name = 'Product'
rules = _IterationFunction.rules.copy()
rules.update({
'MakeBoxes[Product[f_, {i_, a_, b_, 1}],'
' form:StandardForm|TraditionalForm]': (
r'RowBox[{SubsuperscriptBox["\[Product]",'
r' RowBox[{MakeBoxes[i, form], "=", MakeBoxes[a, form]}],'
r' MakeBoxes[b, form]], MakeBoxes[f, form]}]'),
})
def get_result(self, items):
return Expression('Times', *items)
def to_sympy(self, expr, **kwargs):
if expr.has_form('Product', 2) and expr.leaves[1].has_form('List', 3):
index = expr.leaves[1]
try:
return sympy.product(expr.leaves[0].to_sympy(), (
index.leaves[0].to_sympy(), index.leaves[1].to_sympy(),
index.leaves[2].to_sympy()))
except ZeroDivisionError:
pass
class Piecewise(SympyFunction):
"""
<dl>
<dt>'Piecewise[{{expr1, cond1}, ...}]'
<dd>represents a piecewise function.
<dt>'Piecewise[{{expr1, cond1}, ...}, expr]'
<dd>represents a piecewise function with default 'expr'.
</dl>
Heaviside function
>> Piecewise[{{0, x <= 0}}, 1]
= Piecewise[{{0, x <= 0}}, 1]
## D[%, x]
## Piecewise({{0, Or[x < 0, x > 0]}}, Indeterminate).
>> Integrate[Piecewise[{{1, x <= 0}, {-1, x > 0}}], x]
= Piecewise[{{x, x <= 0}, {-x, x > 0}}]
>> Integrate[Piecewise[{{1, x <= 0}, {-1, x > 0}}], {x, -1, 2}]
= -1
Piecewise defaults to 0 if no other case is matching.
>> Piecewise[{{1, False}}]
= 0
>> Plot[Piecewise[{{Log[x], x > 0}, {x*-0.5, x < 0}}], {x, -1, 1}]
= -Graphics-
>> Piecewise[{{0 ^ 0, False}}, -1]
= -1
"""
sympy_name = 'Piecewise'
attributes = (
'HoldAll',
)
def apply(self, items, evaluation):
'Piecewise[items__]'
result = self.to_sympy(Expression('Piecewise', *items.get_sequence()))
if result is None:
return
if not isinstance(result, sympy.Piecewise):
return from_sympy(result)
def to_sympy(self, expr, **kwargs):
leaves = expr.leaves
if len(leaves) not in (1, 2):
return
sympy_cases = []
for case in leaves[0].leaves:
if case.get_head_name() != 'System`List':
return
if len(case.leaves) != 2:
return
then, cond = case.leaves
sympy_cond = None
if isinstance(cond, Symbol):
cond_name = cond.get_name()
if cond_name == 'System`True':
sympy_cond = True
elif cond_name == 'System`False':
sympy_cond = False
if sympy_cond is None:
sympy_cond = cond.to_sympy(**kwargs)
if not (sympy_cond.is_Relational or sympy_cond.is_Boolean):
return
sympy_cases.append((then.to_sympy(**kwargs), sympy_cond))
if len(leaves) == 2: # default case
sympy_cases.append((leaves[1].to_sympy(**kwargs), True))
else:
sympy_cases.append((Integer(0), True))
return sympy.Piecewise(*sympy_cases)
def from_sympy(self, sympy_name, args):
# Hack to get around weird sympy.Piecewise 'otherwise' behaviour
if str(args[-1].leaves[1]).startswith('System`_True__Dummy_'):
args[-1].leaves[1] = Symbol('True')
return Expression(self.get_name(), args)
class Boole(Builtin):
"""
<dl>
<dt>'Boole[expr]'
<dd>returns 1 if expr is True and 0 if expr is False.
</dl>
>> Boole[2 == 2]
= 1
>> Boole[7 < 5]
= 0
>> Boole[a == 7]
= Boole[a == 7]
"""
attributes = ('Listable',)
def apply(self, expr, evaluation):
'Boole[expr_]'
if isinstance(expr, Symbol):
name = expr.get_name()
if name == 'System`True':
return Integer(1)
elif name == 'System`False':
return Integer(0)
return None
|
turbokongen/home-assistant
|
refs/heads/dev
|
homeassistant/components/melissa/climate.py
|
15
|
"""Support for Melissa Climate A/C."""
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS
from . import DATA_MELISSA
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE
OP_MODES = [
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_OFF,
]
FAN_MODES = [FAN_AUTO, FAN_HIGH, FAN_MEDIUM, FAN_LOW]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Iterate through and add all Melissa devices."""
api = hass.data[DATA_MELISSA]
devices = (await api.async_fetch_devices()).values()
all_devices = []
for device in devices:
if device["type"] == "melissa":
all_devices.append(MelissaClimate(api, device["serial_number"], device))
async_add_entities(all_devices)
class MelissaClimate(ClimateEntity):
"""Representation of a Melissa Climate device."""
def __init__(self, api, serial_number, init_data):
"""Initialize the climate device."""
self._name = init_data["name"]
self._api = api
self._serial_number = serial_number
self._data = init_data["controller_log"]
self._state = None
self._cur_settings = None
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def fan_mode(self):
"""Return the current fan mode."""
if self._cur_settings is not None:
return self.melissa_fan_to_hass(self._cur_settings[self._api.FAN])
@property
def current_temperature(self):
"""Return the current temperature."""
if self._data:
return self._data[self._api.TEMP]
@property
def current_humidity(self):
"""Return the current humidity value."""
if self._data:
return self._data[self._api.HUMIDITY]
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return PRECISION_WHOLE
@property
def hvac_mode(self):
"""Return the current operation mode."""
if self._cur_settings is None:
return None
is_on = self._cur_settings[self._api.STATE] in (
self._api.STATE_ON,
self._api.STATE_IDLE,
)
if not is_on:
return HVAC_MODE_OFF
return self.melissa_op_to_hass(self._cur_settings[self._api.MODE])
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return OP_MODES
@property
def fan_modes(self):
"""List of available fan modes."""
return FAN_MODES
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._cur_settings is None:
return None
return self._cur_settings[self._api.TEMP]
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def min_temp(self):
"""Return the minimum supported temperature for the thermostat."""
return 16
@property
def max_temp(self):
"""Return the maximum supported temperature for the thermostat."""
return 30
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
await self.async_send({self._api.TEMP: temp})
async def async_set_fan_mode(self, fan_mode):
"""Set fan mode."""
melissa_fan_mode = self.hass_fan_to_melissa(fan_mode)
await self.async_send({self._api.FAN: melissa_fan_mode})
async def async_set_hvac_mode(self, hvac_mode):
"""Set operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_send({self._api.STATE: self._api.STATE_OFF})
return
mode = self.hass_mode_to_melissa(hvac_mode)
await self.async_send(
{self._api.MODE: mode, self._api.STATE: self._api.STATE_ON}
)
async def async_send(self, value):
"""Send action to service."""
try:
old_value = self._cur_settings.copy()
self._cur_settings.update(value)
except AttributeError:
old_value = None
if not await self._api.async_send(
self._serial_number, "melissa", self._cur_settings
):
self._cur_settings = old_value
async def async_update(self):
"""Get latest data from Melissa."""
try:
self._data = (await self._api.async_status(cached=True))[
self._serial_number
]
self._cur_settings = (
await self._api.async_cur_settings(self._serial_number)
)["controller"]["_relation"]["command_log"]
except KeyError:
_LOGGER.warning("Unable to update entity %s", self.entity_id)
def melissa_op_to_hass(self, mode):
"""Translate Melissa modes to hass states."""
if mode == self._api.MODE_HEAT:
return HVAC_MODE_HEAT
if mode == self._api.MODE_COOL:
return HVAC_MODE_COOL
if mode == self._api.MODE_DRY:
return HVAC_MODE_DRY
if mode == self._api.MODE_FAN:
return HVAC_MODE_FAN_ONLY
_LOGGER.warning("Operation mode %s could not be mapped to hass", mode)
return None
def melissa_fan_to_hass(self, fan):
"""Translate Melissa fan modes to hass modes."""
if fan == self._api.FAN_AUTO:
return HVAC_MODE_AUTO
if fan == self._api.FAN_LOW:
return FAN_LOW
if fan == self._api.FAN_MEDIUM:
return FAN_MEDIUM
if fan == self._api.FAN_HIGH:
return FAN_HIGH
_LOGGER.warning("Fan mode %s could not be mapped to hass", fan)
return None
def hass_mode_to_melissa(self, mode):
"""Translate hass states to melissa modes."""
if mode == HVAC_MODE_HEAT:
return self._api.MODE_HEAT
if mode == HVAC_MODE_COOL:
return self._api.MODE_COOL
if mode == HVAC_MODE_DRY:
return self._api.MODE_DRY
if mode == HVAC_MODE_FAN_ONLY:
return self._api.MODE_FAN
_LOGGER.warning("Melissa have no setting for %s mode", mode)
def hass_fan_to_melissa(self, fan):
"""Translate hass fan modes to melissa modes."""
if fan == HVAC_MODE_AUTO:
return self._api.FAN_AUTO
if fan == FAN_LOW:
return self._api.FAN_LOW
if fan == FAN_MEDIUM:
return self._api.FAN_MEDIUM
if fan == FAN_HIGH:
return self._api.FAN_HIGH
_LOGGER.warning("Melissa have no setting for %s fan mode", fan)
|
byllyfish/faucet
|
refs/heads/master
|
faucet/__init__.py
|
12133432
| |
ftomassetti/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/ro/__init__.py
|
12133432
| |
ed-/solum
|
refs/heads/master
|
solum/tests/deployer/__init__.py
|
12133432
| |
mahim97/zulip
|
refs/heads/master
|
zerver/management/__init__.py
|
12133432
| |
JimCircadian/ansible
|
refs/heads/devel
|
lib/ansible/modules/clustering/__init__.py
|
12133432
| |
vv1133/home_web
|
refs/heads/master
|
django/conf/locale/de_CH/__init__.py
|
12133432
| |
tudyzhb/yichui
|
refs/heads/master
|
django/contrib/localflavor/ec/__init__.py
|
12133432
| |
slobberchops/rop
|
refs/heads/master
|
opc/utils/__init__.py
|
12133432
| |
gizmag/django-fsm-log
|
refs/heads/master
|
django_fsm_log/decorators.py
|
2
|
from functools import wraps, partial
from .helpers import FSMLogDescriptor
def fsm_log_by(func):
@wraps(func)
def wrapped(instance, *args, **kwargs):
try:
by = kwargs['by']
except KeyError:
return func(instance, *args, **kwargs)
with FSMLogDescriptor(instance, 'by', by):
return func(instance, *args, **kwargs)
return wrapped
def fsm_log_description(func=None, allow_inline=False):
if func is None:
return partial(fsm_log_description, allow_inline=allow_inline)
@wraps(func)
def wrapped(instance, *args, **kwargs):
with FSMLogDescriptor(instance, 'description') as descriptor:
try:
description = kwargs['description']
except KeyError:
if allow_inline:
kwargs['description'] = descriptor
return func(instance, *args, **kwargs)
descriptor.set(description)
return func(instance, *args, **kwargs)
return wrapped
|
nagyistoce/edx-analytics-data-api-client
|
refs/heads/master
|
analyticsclient/exceptions.py
|
3
|
class ClientError(Exception):
""" Common base class for all client errors. """
pass
class NotFoundError(ClientError):
""" URL was not found. """
pass
class InvalidRequestError(ClientError):
""" The API request was invalid. """
pass
class TimeoutError(ClientError):
""" The API server did not respond before the timeout expired. """
pass
|
manjunaths/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/cloud/bigquery_reader_ops.py
|
29
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BigQuery reading support for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_cloud_ops
from tensorflow.python.ops import io_ops
class BigQueryReader(io_ops.ReaderBase):
"""A Reader that outputs keys and tf.Example values from a BigQuery table.
Note(1): This op is currently not linked into the binary. It will be linked
by default after more perf testing.
Note(2): This op currently returns example proto as its output. This is not
final and we are experimenting with adding support for returning csv. Support
for example proto may be deprecated after that.
Example use:
```python
# Assume a BigQuery has the following schema,
# name STRING,
# age INT,
# state STRING
# Create the parse_examples list of features.
features = dict(
name=tf.FixedLenFeature([1], tf.string),
age=tf.FixedLenFeature([1], tf.int32),
state=tf.FixedLenFeature([1], dtype=tf.string, default_value="UNK"))
# Create a Reader.
reader = bigquery_reader_ops.BigQueryReader(project_id=PROJECT,
dataset_id=DATASET,
table_id=TABLE,
timestamp_millis=TIME,
num_partitions=NUM_PARTITIONS,
features=features)
# Populate a queue with the BigQuery Table partitions.
queue = tf.training.string_input_producer(reader.partitions())
# Read and parse examples.
row_id, examples_serialized = reader.read(queue)
examples = tf.parse_example(examples_serialized, features=features)
# Process the Tensors examples["name"], examples["age"], etc...
```
Note that to create a reader a snapshot timestamp is necessary. This
will enable the reader to look at a consistent snapshot of the table.
For more information, see 'Table Decorators' in BigQuery docs.
See ReaderBase for supported methods.
"""
def __init__(self,
project_id,
dataset_id,
table_id,
timestamp_millis,
num_partitions,
features=None,
columns=None,
test_end_point=None,
name=None):
"""Creates a BigQueryReader.
Args:
project_id: GCP project ID.
dataset_id: BigQuery dataset ID.
table_id: BigQuery table ID.
timestamp_millis: timestamp to snapshot the table in milliseconds since
the epoch. Relative (negative or zero) snapshot times are not allowed.
For more details, see 'Table Decorators' in BigQuery docs.
num_partitions: Number of non-overlapping partitions to read from.
features: parse_example compatible dict from keys to `VarLenFeature` and
`FixedLenFeature` objects. Keys are read as columns from the db.
columns: list of columns to read, can be set iff features is None.
test_end_point: Used only for testing purposes (optional).
name: a name for the operation (optional).
Raises:
TypeError: - If features is neither None nor a dict or
- If columns is is neither None nor a list or
- If both features and columns are None or set.
"""
if (features is None) == (columns is None):
raise TypeError("exactly one of features and columns must be set.")
if features is not None:
if not isinstance(features, dict):
raise TypeError("features must be a dict.")
self._columns = list(features.keys())
elif columns is not None:
if not isinstance(columns, list):
raise TypeError("columns must be a list.")
self._columns = columns
self._project_id = project_id
self._dataset_id = dataset_id
self._table_id = table_id
self._timestamp_millis = timestamp_millis
self._num_partitions = num_partitions
self._test_end_point = test_end_point
reader = gen_cloud_ops.big_query_reader(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
columns=self._columns,
test_end_point=self._test_end_point)
super(BigQueryReader, self).__init__(reader)
def partitions(self, name=None):
"""Returns serialized BigQueryTablePartition messages.
These messages represent a non-overlapping division of a table for a
bulk read.
Args:
name: a name for the operation (optional).
Returns:
`1-D` string `Tensor` of serialized `BigQueryTablePartition` messages.
"""
return gen_cloud_ops.generate_big_query_reader_partitions(
name=name,
project_id=self._project_id,
dataset_id=self._dataset_id,
table_id=self._table_id,
timestamp_millis=self._timestamp_millis,
num_partitions=self._num_partitions,
test_end_point=self._test_end_point,
columns=self._columns)
ops.NotDifferentiable("BigQueryReader")
|
IronLanguages/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/lib2to3/fixes/fix_reload.py
|
92
|
"""Fixer for reload().
reload(s) -> imp.reload(s)"""
# Local imports
from .. import fixer_base
from ..fixer_util import ImportAndCall, touch_import
class FixReload(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'reload'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
names = ('imp', 'reload')
new = ImportAndCall(node, results, names)
touch_import(None, 'imp', node)
return new
|
vmax-feihu/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/tests/test_request.py
|
47
|
# (c) 2005 Ben Bangert
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from paste.fixture import *
from paste.request import *
from paste.wsgiwrappers import WSGIRequest
import six
def simpleapp(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
request = WSGIRequest(environ)
body = [
'Hello world!\n', 'The get is %s' % str(request.GET),
' and Val is %s\n' % request.GET.get('name'),
'The languages are: %s\n' % request.languages,
'The accepttypes is: %s\n' % request.match_accept(['text/html', 'application/xml'])]
if six.PY3:
body = [line.encode('utf8') for line in body]
return body
def test_gets():
app = TestApp(simpleapp)
res = app.get('/')
assert 'Hello' in res
assert "get is MultiDict([])" in res
res = app.get('/?name=george')
res.mustcontain("get is MultiDict([('name', 'george')])")
res.mustcontain("Val is george")
def test_language_parsing():
app = TestApp(simpleapp)
res = app.get('/')
assert "The languages are: ['en-us']" in res
res = app.get('/', headers={'Accept-Language':'da, en-gb;q=0.8, en;q=0.7'})
assert "languages are: ['da', 'en-gb', 'en', 'en-us']" in res
res = app.get('/', headers={'Accept-Language':'en-gb;q=0.8, da, en;q=0.7'})
assert "languages are: ['da', 'en-gb', 'en', 'en-us']" in res
def test_mime_parsing():
app = TestApp(simpleapp)
res = app.get('/', headers={'Accept':'text/html'})
assert "accepttypes is: ['text/html']" in res
res = app.get('/', headers={'Accept':'application/xml'})
assert "accepttypes is: ['application/xml']" in res
res = app.get('/', headers={'Accept':'application/xml,*/*'})
assert "accepttypes is: ['text/html', 'application/xml']" in res
def test_bad_cookie():
env = {}
env['HTTP_COOKIE'] = '070-it-:><?0'
assert get_cookie_dict(env) == {}
env['HTTP_COOKIE'] = 'foo=bar'
assert get_cookie_dict(env) == {'foo': 'bar'}
env['HTTP_COOKIE'] = '...'
assert get_cookie_dict(env) == {}
env['HTTP_COOKIE'] = '=foo'
assert get_cookie_dict(env) == {}
env['HTTP_COOKIE'] = '?='
assert get_cookie_dict(env) == {}
|
pombredanne/django-jet
|
refs/heads/master
|
jet/tests/test_utils.py
|
5
|
from datetime import datetime, date
import json
from django.contrib.admin import AdminSite
from django.test import TestCase
from jet.tests.models import TestModel
from jet.utils import JsonResponse, get_model_instance_label, get_app_list, get_admin_site, LazyDateTimeEncoder
class UtilsTestCase(TestCase):
def test_json_response(self):
response = JsonResponse({'str': 'string', 'int': 1})
response_dict = json.loads(response.content.decode())
expected_dict = {"int": 1, "str": "string"}
self.assertEqual(response_dict, expected_dict)
self.assertEqual(response.get('Content-Type'), 'application/json')
def test_get_model_instance_label(self):
field1 = 'value'
field2 = 2
pinned_application = TestModel.objects.create(field1=field1, field2=field2)
self.assertEqual(get_model_instance_label(pinned_application), '%s%d' % (field1, field2))
def test_get_app_list(self):
class User:
is_active = True
is_staff = True
def has_module_perms(self, app):
return True
def has_perm(self, object):
return True
class Request:
user = User()
app_list = get_app_list({
'request': Request(),
'user': None
})
self.assertIsInstance(app_list, list)
for app in app_list:
self.assertIsInstance(app, dict)
self.assertIsNotNone(app, app.get('models'))
self.assertIsNotNone(app, app.get('app_url'))
self.assertIsNotNone(app, app.get('app_label'))
for model in app['models']:
self.assertIsNotNone(app, model.get('object_name'))
self.assertIsNotNone(app, model.get('name'))
def test_get_admin_site(self):
admin_site = get_admin_site({})
self.assertIsInstance(admin_site, AdminSite)
def test_lazy_date_time_encoder_dates(self):
encoder = LazyDateTimeEncoder()
ts = datetime.now()
self.assertEqual(encoder.encode(ts), '"%s"' % ts.isoformat())
ts = date(2015, 5, 3)
self.assertEqual(encoder.encode(ts), '"%s"' % ts.isoformat())
def test_lazy_date_time_encoder_dict(self):
encoder = LazyDateTimeEncoder()
self.assertEqual(encoder.encode({'key': 1}), '{"key": 1}')
|
Julian24816/lHelper
|
refs/heads/master
|
language/german.py
|
1
|
# coding=utf-8
#
# Copyright (C) 2016 Julian Mueller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Manages the parsing of german phrases.
"""
from language.abc import Language, Phrase
from typing import List
German = Language("german")
class GermanPhrase(Phrase):
"""
Holds a German phrase.
"""
def __init__(self, phrase_description: str):
super(GermanPhrase, self).__init__(phrase_description, German)
@staticmethod
def parse_phrase(phrase: str):
"""
Parses a phrase string into a german phrase.
:param phrase: the phrase string.
:return: the Phrase
"""
return GermanPhrase(phrase)
@staticmethod
def get_possible_root_forms_for(string: str) -> List[str]:
"""
Returns all root forms that belong to words that could when bend result in the given string.
:param string: the string to find root forms for
:return: a list of strings
"""
return string
|
yinquan529/platform-external-chromium_org-tools-gyp
|
refs/heads/master
|
tools/pretty_sln.py
|
806
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
'}"\) = "(.*)", "(.*)", "(.*)"$'))
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
vingkan/MebiPenny2012Finals
|
refs/heads/master
|
2011-mebipenny/contest/spelunking/spelunking.py
|
7
|
#!/usr/bin/env python
import sys, re, random
from collections import defaultdict
INFINITY = float('inf')
class Graph(object):
def __init__(self):
self.edges = defaultdict(lambda: set())
self.capacities = defaultdict(lambda: 0)
def copy(self):
new_g = Graph()
new_g.edges = self.edges.copy()
new_g.capacities = self.capacities.copy()
return new_g
def addEdge(self, vertex1, vertex2, capacity):
self.edges[vertex1].add(vertex2)
self.edges[vertex2].add(vertex1)
self.capacities[(vertex1, vertex2)] = capacity
def __str__(self):
return str((self.edges, self.capacities))
def findPath(self, s, t, flow):
parent = defaultdict(lambda: -1)
parent[s] = -2
bottleneck = defaultdict(lambda: INFINITY)
queue = []
queue.append(s)
while queue:
u = queue.pop()
for v in self.edges[u]:
current_capacity = self.capacities[(u, v)] - flow[(u, v)]
if current_capacity <= 0: continue
if parent[v] != -1: continue
parent[v] = u
bottleneck[v] = min(bottleneck[u], current_capacity)
if v != t:
queue.append(v)
else:
path = [t]
while path[0] != s:
path = [parent[path[0]]] + path
return path, bottleneck[t]
return None, None
def maxFlow(self, s, t):
flow = defaultdict(lambda: 0)
while True:
path, bottleneck = self.findPath(s, t, flow)
if path is None: break
assert bottleneck < INFINITY
prev = s
i = 1
while i < len(path):
flow[path[i-1], path[i]] += bottleneck
flow[path[i], path[i-1]] -= bottleneck
i += 1
return sum(flow[(s,v)] for v in self.edges[s])
def int_stream(datastream):
whitespace = re.compile(r'\s+')
for line in datastream:
chunks = whitespace.split(line)
for chunk in chunks:
if not chunk: continue
yield int(chunk)
def random_int_stream(first, a, b):
for thing in first: yield thing
while True:
yield random.randint(a, b)
def main(argv):
ints = int_stream(sys.stdin)
cave_count = ints.next()
corridor_count = ints.next()
g = Graph()
for _ in xrange(corridor_count):
a, b, c = ints.next(), ints.next(), ints.next()
g.addEdge(a, b, c)
print g.maxFlow(1, cave_count)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
z25/BgeHttpServer
|
refs/heads/master
|
webob/headers.py
|
78
|
from collections import MutableMapping
from webob.compat import (
iteritems_,
string_types,
)
from webob.multidict import MultiDict
__all__ = ['ResponseHeaders', 'EnvironHeaders']
class ResponseHeaders(MultiDict):
"""
Dictionary view on the response headerlist.
Keys are normalized for case and whitespace.
"""
def __getitem__(self, key):
key = key.lower()
for k, v in reversed(self._items):
if k.lower() == key:
return v
raise KeyError(key)
def getall(self, key):
key = key.lower()
result = []
for k, v in self._items:
if k.lower() == key:
result.append(v)
return result
def mixed(self):
r = self.dict_of_lists()
for key, val in iteritems_(r):
if len(val) == 1:
r[key] = val[0]
return r
def dict_of_lists(self):
r = {}
for key, val in iteritems_(self):
r.setdefault(key.lower(), []).append(val)
return r
def __setitem__(self, key, value):
norm_key = key.lower()
items = self._items
for i in range(len(items)-1, -1, -1):
if items[i][0].lower() == norm_key:
del items[i]
self._items.append((key, value))
def __delitem__(self, key):
key = key.lower()
items = self._items
found = False
for i in range(len(items)-1, -1, -1):
if items[i][0].lower() == key:
del items[i]
found = True
if not found:
raise KeyError(key)
def __contains__(self, key):
key = key.lower()
for k, v in self._items:
if k.lower() == key:
return True
return False
has_key = __contains__
def setdefault(self, key, default=None):
c_key = key.lower()
for k, v in self._items:
if k.lower() == c_key:
return v
self._items.append((key, default))
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got %s"
% repr(1 + len(args)))
key = key.lower()
for i in range(len(self._items)):
if self._items[i][0].lower() == key:
v = self._items[i][1]
del self._items[i]
return v
if args:
return args[0]
else:
raise KeyError(key)
key2header = {
'CONTENT_TYPE': 'Content-Type',
'CONTENT_LENGTH': 'Content-Length',
'HTTP_CONTENT_TYPE': 'Content_Type',
'HTTP_CONTENT_LENGTH': 'Content_Length',
}
header2key = dict([(v.upper(),k) for (k,v) in key2header.items()])
def _trans_key(key):
if not isinstance(key, string_types):
return None
elif key in key2header:
return key2header[key]
elif key.startswith('HTTP_'):
return key[5:].replace('_', '-').title()
else:
return None
def _trans_name(name):
name = name.upper()
if name in header2key:
return header2key[name]
return 'HTTP_'+name.replace('-', '_')
class EnvironHeaders(MutableMapping):
"""An object that represents the headers as present in a
WSGI environment.
This object is a wrapper (with no internal state) for a WSGI
request object, representing the CGI-style HTTP_* keys as a
dictionary. Because a CGI environment can only hold one value for
each key, this dictionary is single-valued (unlike outgoing
headers).
"""
def __init__(self, environ):
self.environ = environ
def __getitem__(self, hname):
return self.environ[_trans_name(hname)]
def __setitem__(self, hname, value):
self.environ[_trans_name(hname)] = value
def __delitem__(self, hname):
del self.environ[_trans_name(hname)]
def keys(self):
return filter(None, map(_trans_key, self.environ))
def __contains__(self, hname):
return _trans_name(hname) in self.environ
def __len__(self):
return len(list(self.keys()))
def __iter__(self):
for k in self.keys():
yield k
|
jostep/tensorflow
|
refs/heads/master
|
tensorflow/contrib/seq2seq/python/ops/__init__.py
|
12133432
| |
mrrrgn/olympia
|
refs/heads/master
|
apps/files/__init__.py
|
12133432
| |
msebire/intellij-community
|
refs/heads/master
|
python/testData/testRunner/env/createConfigurationTest/folder_with_word_tests_in_name/__init__.py
|
12133432
| |
frankvdp/django
|
refs/heads/master
|
django/conf/locale/es/__init__.py
|
12133432
| |
pigeonflight/strider-plone
|
refs/heads/master
|
docker/appengine/lib/django-1.2/tests/regressiontests/model_formsets_regress/__init__.py
|
12133432
| |
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/netvisor/__init__.py
|
12133432
| |
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/com/win32com/client/makepy.py
|
16
|
# Originally written by Curt Hagenlocher, and various bits
# and pieces by Mark Hammond (and now Greg Stein has had
# a go too :-)
# Note that the main worker code has been moved to genpy.py
# As this is normally run from the command line, it reparses the code each time.
# Now this is nothing more than the command line handler and public interface.
# XXX - TO DO
# XXX - Greg and Mark have some ideas for a revamp - just no
# time - if you want to help, contact us for details.
# Main idea is to drop the classes exported and move to a more
# traditional data driven model.
"""Generate a .py file from an OLE TypeLibrary file.
This module is concerned only with the actual writing of
a .py file. It draws on the @build@ module, which builds
the knowledge of a COM interface.
"""
usageHelp = """ \
Usage:
makepy.py [-i] [-v|q] [-h] [-u] [-o output_file] [-d] [typelib, ...]
-i -- Show information for the specified typelib.
-v -- Verbose output.
-q -- Quiet output.
-h -- Do not generate hidden methods.
-u -- Python 1.5 and earlier: Do NOT convert all Unicode objects to
strings.
Python 1.6 and later: Convert all Unicode objects to strings.
-o -- Create output in a specified output file. If the path leading
to the file does not exist, any missing directories will be
created.
NOTE: -o cannot be used with -d. This will generate an error.
-d -- Generate the base code now and the class code on demand.
Recommended for large type libraries.
typelib -- A TLB, DLL, OCX or anything containing COM type information.
If a typelib is not specified, a window containing a textbox
will open from which you can select a registered type
library.
Examples:
makepy.py -d
Presents a list of registered type libraries from which you can make
a selection.
makepy.py -d "Microsoft Excel 8.0 Object Library"
Generate support for the type library with the specified description
(in this case, the MS Excel object model).
"""
import sys, os, pythoncom
from win32com.client import genpy, selecttlb, gencache
from win32com.client import Dispatch
bForDemandDefault = 0 # Default value of bForDemand - toggle this to change the world - see also gencache.py
error = "makepy.error"
def usage():
sys.stderr.write (usageHelp)
sys.exit(2)
def ShowInfo(spec):
if not spec:
tlbSpec = selecttlb.SelectTlb(excludeFlags=selecttlb.FLAG_HIDDEN)
if tlbSpec is None:
return
try:
tlb = pythoncom.LoadRegTypeLib(tlbSpec.clsid, tlbSpec.major, tlbSpec.minor, tlbSpec.lcid)
except pythoncom.com_error: # May be badly registered.
sys.stderr.write("Warning - could not load registered typelib '%s'\n" % (tlbSpec.clsid))
tlb = None
infos = [(tlb, tlbSpec)]
else:
infos = GetTypeLibsForSpec(spec)
for (tlb, tlbSpec) in infos:
desc = tlbSpec.desc
if desc is None:
if tlb is None:
desc = "<Could not load typelib %s>" % (tlbSpec.dll)
else:
desc = tlb.GetDocumentation(-1)[0]
print desc
print " %s, lcid=%s, major=%s, minor=%s" % (tlbSpec.clsid, tlbSpec.lcid, tlbSpec.major, tlbSpec.minor)
print " >>> # Use these commands in Python code to auto generate .py support"
print " >>> from win32com.client import gencache"
print " >>> gencache.EnsureModule('%s', %s, %s, %s)" % (tlbSpec.clsid, tlbSpec.lcid, tlbSpec.major, tlbSpec.minor)
class SimpleProgress(genpy.GeneratorProgress):
"""A simple progress class prints its output to stderr
"""
def __init__(self, verboseLevel):
self.verboseLevel = verboseLevel
def Close(self):
pass
def Finished(self):
if self.verboseLevel>1:
sys.stderr.write("Generation complete..\n")
def SetDescription(self, desc, maxticks = None):
if self.verboseLevel:
sys.stderr.write(desc + "\n")
def Tick(self, desc = None):
pass
def VerboseProgress(self, desc, verboseLevel = 2):
if self.verboseLevel >= verboseLevel:
sys.stderr.write(desc + "\n")
def LogBeginGenerate(self, filename):
self.VerboseProgress("Generating to %s" % filename, 1)
def LogWarning(self, desc):
self.VerboseProgress("WARNING: " + desc, 1)
class GUIProgress(SimpleProgress):
def __init__(self, verboseLevel):
# Import some modules we need to we can trap failure now.
import win32ui, pywin
SimpleProgress.__init__(self, verboseLevel)
self.dialog = None
def Close(self):
if self.dialog is not None:
self.dialog.Close()
self.dialog = None
def Starting(self, tlb_desc):
SimpleProgress.Starting(self, tlb_desc)
if self.dialog is None:
from pywin.dialogs import status
self.dialog=status.ThreadedStatusProgressDialog(tlb_desc)
else:
self.dialog.SetTitle(tlb_desc)
def SetDescription(self, desc, maxticks = None):
self.dialog.SetText(desc)
if maxticks:
self.dialog.SetMaxTicks(maxticks)
def Tick(self, desc = None):
self.dialog.Tick()
if desc is not None:
self.dialog.SetText(desc)
def GetTypeLibsForSpec(arg):
"""Given an argument on the command line (either a file name, library
description, or ProgID of an object) return a list of actual typelibs
to use. """
typelibs = []
try:
try:
tlb = pythoncom.LoadTypeLib(arg)
spec = selecttlb.TypelibSpec(None, 0,0,0)
spec.FromTypelib(tlb, arg)
typelibs.append((tlb, spec))
except pythoncom.com_error:
# See if it is a description
tlbs = selecttlb.FindTlbsWithDescription(arg)
if len(tlbs)==0:
# Maybe it is the name of a COM object?
try:
ob = Dispatch(arg)
# and if so, it must support typelib info
tlb, index = ob._oleobj_.GetTypeInfo().GetContainingTypeLib()
spec = selecttlb.TypelibSpec(None, 0,0,0)
spec.FromTypelib(tlb)
tlbs.append(spec)
except pythoncom.com_error:
pass
if len(tlbs)==0:
print "Could not locate a type library matching '%s'" % (arg)
for spec in tlbs:
# Version numbers not always reliable if enumerated from registry.
# (as some libs use hex, other's dont. Both examples from MS, of course.)
if spec.dll is None:
tlb = pythoncom.LoadRegTypeLib(spec.clsid, spec.major, spec.minor, spec.lcid)
else:
tlb = pythoncom.LoadTypeLib(spec.dll)
# We have a typelib, but it may not be exactly what we specified
# (due to automatic version matching of COM). So we query what we really have!
attr = tlb.GetLibAttr()
spec.major = attr[3]
spec.minor = attr[4]
spec.lcid = attr[1]
typelibs.append((tlb, spec))
return typelibs
except pythoncom.com_error:
t,v,tb=sys.exc_info()
sys.stderr.write ("Unable to load type library from '%s' - %s\n" % (arg, v))
tb = None # Storing tb in a local is a cycle!
sys.exit(1)
def GenerateFromTypeLibSpec(typelibInfo, file = None, verboseLevel = None, progressInstance = None, bUnicodeToString=None, bForDemand = bForDemandDefault, bBuildHidden = 1):
assert bUnicodeToString is None, "this is deprecated and will go away"
if verboseLevel is None:
verboseLevel = 0 # By default, we use no gui and no verbose level!
if bForDemand and file is not None:
raise RuntimeError("You can only perform a demand-build when the output goes to the gen_py directory")
if isinstance(typelibInfo, tuple):
# Tuple
typelibCLSID, lcid, major, minor = typelibInfo
tlb = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid)
spec = selecttlb.TypelibSpec(typelibCLSID, lcid, major, minor)
spec.FromTypelib(tlb, str(typelibCLSID))
typelibs = [(tlb, spec)]
elif isinstance(typelibInfo, selecttlb.TypelibSpec):
if typelibInfo.dll is None:
# Version numbers not always reliable if enumerated from registry.
tlb = pythoncom.LoadRegTypeLib(typelibInfo.clsid, typelibInfo.major, typelibInfo.minor, typelibInfo.lcid)
else:
tlb = pythoncom.LoadTypeLib(typelibInfo.dll)
typelibs = [(tlb, typelibInfo)]
elif hasattr(typelibInfo, "GetLibAttr"):
# A real typelib object!
# Could also use isinstance(typelibInfo, PyITypeLib) instead, but PyITypeLib is not directly exposed by pythoncom.
# pythoncom.TypeIIDs[pythoncom.IID_ITypeLib] seems to work
tla = typelibInfo.GetLibAttr()
guid = tla[0]
lcid = tla[1]
major = tla[3]
minor = tla[4]
spec = selecttlb.TypelibSpec(guid, lcid, major, minor)
typelibs = [(typelibInfo, spec)]
else:
typelibs = GetTypeLibsForSpec(typelibInfo)
if progressInstance is None:
progressInstance = SimpleProgress(verboseLevel)
progress = progressInstance
bToGenDir = (file is None)
for typelib, info in typelibs:
gen = genpy.Generator(typelib, info.dll, progress, bBuildHidden=bBuildHidden)
if file is None:
this_name = gencache.GetGeneratedFileName(info.clsid, info.lcid, info.major, info.minor)
full_name = os.path.join(gencache.GetGeneratePath(), this_name)
if bForDemand:
try: os.unlink(full_name + ".py")
except os.error: pass
try: os.unlink(full_name + ".pyc")
except os.error: pass
try: os.unlink(full_name + ".pyo")
except os.error: pass
if not os.path.isdir(full_name):
os.mkdir(full_name)
outputName = os.path.join(full_name, "__init__.py")
else:
outputName = full_name + ".py"
fileUse = gen.open_writer(outputName)
progress.LogBeginGenerate(outputName)
else:
fileUse = file
worked = False
try:
gen.generate(fileUse, bForDemand)
worked = True
finally:
if file is None:
gen.finish_writer(outputName, fileUse, worked)
if bToGenDir:
progress.SetDescription("Importing module")
gencache.AddModuleToCache(info.clsid, info.lcid, info.major, info.minor)
progress.Close()
def GenerateChildFromTypeLibSpec(child, typelibInfo, verboseLevel = None, progressInstance = None, bUnicodeToString=None):
assert bUnicodeToString is None, "this is deprecated and will go away"
if verboseLevel is None:
verboseLevel = 0 # By default, we use no gui, and no verbose level for the children.
if type(typelibInfo)==type(()):
typelibCLSID, lcid, major, minor = typelibInfo
tlb = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid)
else:
tlb = typelibInfo
tla = typelibInfo.GetLibAttr()
typelibCLSID = tla[0]
lcid = tla[1]
major = tla[3]
minor = tla[4]
spec = selecttlb.TypelibSpec(typelibCLSID, lcid, major, minor)
spec.FromTypelib(tlb, str(typelibCLSID))
typelibs = [(tlb, spec)]
if progressInstance is None:
progressInstance = SimpleProgress(verboseLevel)
progress = progressInstance
for typelib, info in typelibs:
dir_name = gencache.GetGeneratedFileName(info.clsid, info.lcid, info.major, info.minor)
dir_path_name = os.path.join(gencache.GetGeneratePath(), dir_name)
progress.LogBeginGenerate(dir_path_name)
gen = genpy.Generator(typelib, info.dll, progress)
gen.generate_child(child, dir_path_name)
progress.SetDescription("Importing module")
__import__("win32com.gen_py." + dir_name + "." + child)
progress.Close()
def main():
import getopt
hiddenSpec = 1
outputName = None
verboseLevel = 1
doit = 1
bForDemand = bForDemandDefault
try:
opts, args = getopt.getopt(sys.argv[1:], 'vo:huiqd')
for o,v in opts:
if o=='-h':
hiddenSpec = 0
elif o=='-o':
outputName = v
elif o=='-v':
verboseLevel = verboseLevel + 1
elif o=='-q':
verboseLevel = verboseLevel - 1
elif o=='-i':
if len(args)==0:
ShowInfo(None)
else:
for arg in args:
ShowInfo(arg)
doit = 0
elif o=='-d':
bForDemand = not bForDemand
except (getopt.error, error), msg:
sys.stderr.write (str(msg) + "\n")
usage()
if bForDemand and outputName is not None:
sys.stderr.write("Can not use -d and -o together\n")
usage()
if not doit:
return 0
if len(args)==0:
rc = selecttlb.SelectTlb()
if rc is None:
sys.exit(1)
args = [ rc ]
if outputName is not None:
path = os.path.dirname(outputName)
if path is not '' and not os.path.exists(path):
os.makedirs(path)
if sys.version_info > (3,0):
f = open(outputName, "wt", encoding="mbcs")
else:
import codecs # not available in py3k.
f = codecs.open(outputName, "w", "mbcs")
else:
f = None
for arg in args:
GenerateFromTypeLibSpec(arg, f, verboseLevel = verboseLevel, bForDemand = bForDemand, bBuildHidden = hiddenSpec)
if f:
f.close()
if __name__=='__main__':
rc = main()
if rc:
sys.exit(rc)
sys.exit(0)
|
geometrybase/gensim
|
refs/heads/develop
|
gensim/parsing/porter.py
|
86
|
#!/usr/bin/env python
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta (v@nano.com)
Release 1: January 2001
Further adjustments by Santiago Bruno (bananabruno@gmail.com)
to allow word input not restricted to one word per line, leading
to:
Release 2: July 2008
Optimizations and cleanup of the code by Lars Buitinck, July 2012.
"""
from six.moves import xrange
class PorterStemmer(object):
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[0],
b[1] ... ending at b[k]. k is readjusted downwards as the stemming
progresses.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.j = 0 # j is a general offset into the string
def _cons(self, i):
"""True <=> b[i] is a consonant."""
ch = self.b[i]
if ch in "aeiou":
return False
if ch == 'y':
return i == 0 or not self._cons(i - 1)
return True
def _m(self):
"""Returns the number of consonant sequences between 0 and j.
If c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
i = 0
while True:
if i > self.j:
return 0
if not self._cons(i):
break
i += 1
i += 1
n = 0
while True:
while True:
if i > self.j:
return n
if self._cons(i):
break
i += 1
i += 1
n += 1
while 1:
if i > self.j:
return n
if not self._cons(i):
break
i += 1
i += 1
def _vowelinstem(self):
"""True <=> 0,...j contains a vowel"""
return not all(self._cons(i) for i in xrange(self.j + 1))
def _doublec(self, j):
"""True <=> j,(j-1) contain a double consonant."""
return j > 0 and self.b[j] == self.b[j-1] and self._cons(j)
def _cvc(self, i):
"""True <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. This is used when trying to
restore an e at the end of a short word, e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < 2 or not self._cons(i) or self._cons(i-1) or not self._cons(i-2):
return False
return self.b[i] not in "wxy"
def _ends(self, s):
"""True <=> 0,...k ends with the string s."""
if s[-1] != self.b[self.k]: # tiny speed-up
return 0
length = len(s)
if length > (self.k + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def _setto(self, s):
"""Set (j+1),...k to the characters in the string s, adjusting k."""
self.b = self.b[:self.j+1] + s
self.k = len(self.b) - 1
def _r(self, s):
if self._m() > 0:
self._setto(s)
def _step1ab(self):
"""Get rid of plurals and -ed or -ing. E.g.,
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self._ends("sses"):
self.k -= 2
elif self._ends("ies"):
self._setto("i")
elif self.b[self.k - 1] != 's':
self.k -= 1
if self._ends("eed"):
if self._m() > 0:
self.k -= 1
elif (self._ends("ed") or self._ends("ing")) and self._vowelinstem():
self.k = self.j
if self._ends("at"): self._setto("ate")
elif self._ends("bl"): self._setto("ble")
elif self._ends("iz"): self._setto("ize")
elif self._doublec(self.k):
if self.b[self.k - 1] not in "lsz":
self.k -= 1
elif self._m() == 1 and self._cvc(self.k):
self._setto("e")
def _step1c(self):
"""Turn terminal y to i when there is another vowel in the stem."""
if self._ends("y") and self._vowelinstem():
self.b = self.b[:self.k] + 'i'
def _step2(self):
"""Map double suffices to single ones.
So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the
string before the suffix must give _m() > 0.
"""
ch = self.b[self.k - 1]
if ch == 'a':
if self._ends("ational"): self._r("ate")
elif self._ends("tional"): self._r("tion")
elif ch == 'c':
if self._ends("enci"): self._r("ence")
elif self._ends("anci"): self._r("ance")
elif ch == 'e':
if self._ends("izer"): self._r("ize")
elif ch == 'l':
if self._ends("bli"): self._r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self._ends("abli"): self._r("able")
elif self._ends("alli"): self._r("al")
elif self._ends("entli"): self._r("ent")
elif self._ends("eli"): self._r("e")
elif self._ends("ousli"): self._r("ous")
elif ch == 'o':
if self._ends("ization"): self._r("ize")
elif self._ends("ation"): self._r("ate")
elif self._ends("ator"): self._r("ate")
elif ch == 's':
if self._ends("alism"): self._r("al")
elif self._ends("iveness"): self._r("ive")
elif self._ends("fulness"): self._r("ful")
elif self._ends("ousness"): self._r("ous")
elif ch == 't':
if self._ends("aliti"): self._r("al")
elif self._ends("iviti"): self._r("ive")
elif self._ends("biliti"): self._r("ble")
elif ch == 'g': # --DEPARTURE--
if self._ends("logi"): self._r("log")
# To match the published algorithm, delete this phrase
def _step3(self):
"""Deal with -ic-, -full, -ness etc. Similar strategy to _step2."""
ch = self.b[self.k]
if ch == 'e':
if self._ends("icate"): self._r("ic")
elif self._ends("ative"): self._r("")
elif self._ends("alize"): self._r("al")
elif ch == 'i':
if self._ends("iciti"): self._r("ic")
elif ch == 'l':
if self._ends("ical"): self._r("ic")
elif self._ends("ful"): self._r("")
elif ch == 's':
if self._ends("ness"): self._r("")
def _step4(self):
"""_step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
ch = self.b[self.k - 1]
if ch == 'a':
if not self._ends("al"): return
elif ch == 'c':
if not self._ends("ance") and not self._ends("ence"): return
elif ch == 'e':
if not self._ends("er"): return
elif ch == 'i':
if not self._ends("ic"): return
elif ch == 'l':
if not self._ends("able") and not self._ends("ible"): return
elif ch == 'n':
if self._ends("ant"): pass
elif self._ends("ement"): pass
elif self._ends("ment"): pass
elif self._ends("ent"): pass
else: return
elif ch == 'o':
if self._ends("ion") and self.b[self.j] in "st": pass
elif self._ends("ou"): pass
# takes care of -ous
else: return
elif ch == 's':
if not self._ends("ism"): return
elif ch == 't':
if not self._ends("ate") and not self._ends("iti"): return
elif ch == 'u':
if not self._ends("ous"): return
elif ch == 'v':
if not self._ends("ive"): return
elif ch == 'z':
if not self._ends("ize"): return
else:
return
if self._m() > 1:
self.k = self.j
def _step5(self):
"""Remove a final -e if _m() > 1, and change -ll to -l if m() > 1.
"""
k = self.j = self.k
if self.b[k] == 'e':
a = self._m()
if a > 1 or (a == 1 and not self._cvc(k - 1)):
self.k -= 1
if self.b[self.k] == 'l' and self._doublec(self.k) and self._m() > 1:
self.k -= 1
def stem(self, w):
"""Stem the word w, return the stemmed form."""
w = w.lower()
k = len(w) - 1
if k <= 1:
return w # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.b = w
self.k = k
self._step1ab()
self._step1c()
self._step2()
self._step3()
self._step4()
self._step5()
return self.b[:self.k+1]
def stem_sentence(self, txt):
return " ".join(map(self.stem, txt.split()))
def stem_documents(self, docs):
return map(self.stem_sentence, docs)
if __name__ == '__main__':
import sys
p = PorterStemmer()
for f in sys.argv[1:]:
with open(f) as infile:
for line in infile:
print(p.stem_sentence(line))
|
adelina-t/nova
|
refs/heads/master
|
nova/api/openstack/compute/plugins/v3/flavor_rxtx.py
|
36
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Rxtx API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = 'os-flavor-rxtx'
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class FlavorRxtxController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = 'rxtx_factor'
flavor[key] = db_flavor['rxtx_factor'] or ""
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class FlavorRxtx(extensions.V3APIExtensionBase):
"""Support to show the rxtx status of a flavor."""
name = "FlavorRxtx"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = FlavorRxtxController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
def get_resources(self):
return []
|
vmindru/ansible
|
refs/heads/devel
|
lib/ansible/plugins/doc_fragments/vultr.py
|
36
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
api_key:
description:
- API key of the Vultr API.
- The ENV variable C(VULTR_API_KEY) is used as default, when defined.
api_timeout:
description:
- HTTP timeout to Vultr API.
- The ENV variable C(VULTR_API_TIMEOUT) is used as default, when defined.
- Fallback value is 60 seconds if not specified.
api_retries:
description:
- Amount of retries in case of the Vultr API retuns an HTTP 503 code.
- The ENV variable C(VULTR_API_RETRIES) is used as default, when defined.
- Fallback value is 5 retries if not specified.
api_account:
description:
- Name of the ini section in the C(vultr.ini) file.
- The ENV variable C(VULTR_API_ACCOUNT) is used as default, when defined.
default: default
api_endpoint:
description:
- URL to API endpint (without trailing slash).
- The ENV variable C(VULTR_API_ENDPOINT) is used as default, when defined.
- Fallback value is U(https://api.vultr.com) if not specified.
validate_certs:
description:
- Validate SSL certs of the Vultr API.
default: yes
type: bool
requirements:
- "python >= 2.6"
notes:
- Also see the API documentation on https://www.vultr.com/api/.
'''
|
magvugr/AT
|
refs/heads/master
|
EntVirtual/lib/python2.7/site-packages/django/views/debug.py
|
20
|
from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.urls import Resolver404, resolve
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _(
"Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."
),
"explanation": _(
"You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"
),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.textContent = s.textContent == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.textContent = link.textContent.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.get_raw_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% if template_info.bottom != template_info.total %} cut-bottom{% endif %}">
{% for source_line in template_info.source_lines %}
{% if source_line.0 == template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endif %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:0 %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public website">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
{% if request.user %}
<h3 id="user-info">USER</h3>
<p>{{ request.user }}</p>
{% endif %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:0 %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""") # NOQA
TECHNICAL_500_TEXT_TEMPLATE = (""""""
"""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.get_raw_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{% if settings.MIDDLEWARE is not None %}{{ settings.MIDDLEWARE|pprint }}"""
"""{% else %}{{ settings.MIDDLEWARE_CLASSES|pprint }}{% endif %}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %}"""
""" * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% if source_line.0 == template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endif %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
{% if request.user %}USER: {{ request.user }}{% endif %}
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:0 %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""") # NOQA
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
|
fulfilio/trytond-shipping-ups
|
refs/heads/develop
|
docs/conf.py
|
3
|
# -*- coding: utf-8 -*-
#
# Tryton Shipping UPS documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 8 12:01:12 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tryton Shipping UPS'
copyright = u'2015, Fulfil.io Inc., 2015, Openlabs'
author = u'Fulfil.io'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'VERSION'
# The full version, including alpha/beta/rc tags.
release = 'VERSION'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TrytonShippingUPSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TrytonShippingUPS.tex', u'Tryton Shipping UPS Documentation',
u'Openlabs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'trytonshippingups', u'Tryton Shipping UPS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TrytonShippingUPS', u'Tryton Shipping UPS Documentation',
author, 'TrytonShippingUPS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
smandy/d_c_experiment
|
refs/heads/master
|
scons-local-2.3.4/SCons/Options/ListOption.py
|
9
|
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/ListOption.py 2014/09/27 12:51:43 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def ListOption(*args, **kw):
global warned
if not warned:
msg = "The ListOption() function is deprecated; use the ListVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.ListVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rajendrauppal/coding-interview
|
refs/heads/master
|
programming_languages/Python/lambda.py
|
1
|
import math
def sqroot(x):
return math.sqrt(x)
square_root = lambda x: math.sqrt(x)
print(sqroot(65))
print(square_root(65))
import Tkinter as tk
class App:
def __init__(self, parent):
frame = tk.Frame(parent)
frame.pack()
btn22 = tk.Button(frame, text="22", command=lambda: self.print_num(22))
btn22.pack(side=tk.LEFT)
btn44 = tk.Button(frame, text="44", command=lambda: self.print_num(44))
btn44.pack(side=tk.LEFT)
quit_btn = tk.Button(frame, text="Quit", fg="red", command=frame.quit)
quit_btn.pack(side=tk.LEFT)
def print_num(self, num):
print("You pressed button {}".format(num))
if __name__ == '__main__':
root = tk.Tk()
app = App(root)
root.mainloop()
|
fydlzr/scrapy-redis
|
refs/heads/master
|
scrapy_redis/dupefilter.py
|
17
|
import time
from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
from . import connection
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplication filter"""
def __init__(self, server, key):
"""Initialize duplication filter
Parameters
----------
server : Redis instance
key : str
Where to store fingerprints
"""
self.server = server
self.key = key
@classmethod
def from_settings(cls, settings):
server = connection.from_settings(settings)
# create one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
key = "dupefilter:%s" % int(time.time())
return cls(server, key)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def request_seen(self, request):
fp = request_fingerprint(request)
added = self.server.sadd(self.key, fp)
return not added
def close(self, reason):
"""Delete data on close. Called by scrapy's scheduler"""
self.clear()
def clear(self):
"""Clears fingerprints data"""
self.server.delete(self.key)
|
crazy-canux/django
|
refs/heads/master
|
tests/foreign_object/models/__init__.py
|
208
|
from .article import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, NewsArticle,
)
from .empty_join import SlugPage
from .person import Country, Friendship, Group, Membership, Person
__all__ = [
'Article', 'ArticleIdea', 'ArticleTag', 'ArticleTranslation', 'Country',
'Friendship', 'Group', 'Membership', 'NewsArticle', 'Person', 'SlugPage',
]
|
xbmc/atv2
|
refs/heads/atv2
|
xbmc/lib/libPython/Python/Lib/encodings/euc_jisx0213.py
|
12
|
#
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
# $CJKCodecs: euc_jisx0213.py,v 1.8 2004/06/28 18:16:03 perky Exp $
#
import _codecs_jp, codecs
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class StreamReader(Codec, codecs.StreamReader):
def __init__(self, stream, errors='strict'):
codecs.StreamReader.__init__(self, stream, errors)
__codec = codec.StreamReader(stream, errors)
self.read = __codec.read
self.readline = __codec.readline
self.readlines = __codec.readlines
self.reset = __codec.reset
class StreamWriter(Codec, codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
codecs.StreamWriter.__init__(self, stream, errors)
__codec = codec.StreamWriter(stream, errors)
self.write = __codec.write
self.writelines = __codec.writelines
self.reset = __codec.reset
def getregentry():
return (codec.encode, codec.decode, StreamReader, StreamWriter)
|
collex100/odoo
|
refs/heads/8.0
|
addons/hr_contract/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_contract
import base_action_rule
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
SerpentCS/odoo
|
refs/heads/8.0
|
addons/account_followup/__openerp__.py
|
261
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payment Follow-up Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Module to automate letters for unpaid invoices, with multi-level recalls.
=========================================================================
You can define your multiple levels of recall through the menu:
---------------------------------------------------------------
Configuration / Follow-up / Follow-up Levels
Once it is defined, you can automatically print recalls every day through simply clicking on the menu:
------------------------------------------------------------------------------------------------------
Payment Follow-Up / Send Email and letters
It will generate a PDF / send emails / set manual actions according to the the different levels
of recall defined. You can define different policies for different companies.
Note that if you want to check the follow-up level for a given partner/account entry, you can do from in the menu:
------------------------------------------------------------------------------------------------------------------
Reporting / Accounting / **Follow-ups Analysis
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['account_accountant', 'mail'],
'data': [
'security/account_followup_security.xml',
'security/ir.model.access.csv',
'report/account_followup_report.xml',
'account_followup_data.xml',
'account_followup_view.xml',
'account_followup_customers.xml',
'wizard/account_followup_print_view.xml',
'res_config_view.xml',
'views/report_followup.xml',
'account_followup_reports.xml'
],
'demo': ['account_followup_demo.xml'],
'test': [
'test/account_followup.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Hao-Liu/tp-libvirt
|
refs/heads/master
|
libguestfs/tests/guestfs_file_operations.py
|
8
|
import re
import os
import logging
import tarfile
from autotest.client.shared import utils, error
from virttest import data_dir, utils_test
def test_tar_in(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Write a tempfile on host
3) Copy file to guest with tar-in
4) Delete created file
5) Check file on guest
"""
content = "This is file for test of tar-in."
path = params.get("gf_temp_file", "/tmp/test_tar_in")
path_on_host = os.path.join(data_dir.get_tmp_dir(), "test_tar_in.tar")
# Create a file on host
try:
open(path, 'w').write(content)
except IOError, detail:
raise error.TestNAError("Prepare file on host failed:%s" % detail)
try:
tar = tarfile.open(path_on_host, "w")
tar.add(path)
tar.close()
except tarfile.TarError, detail:
raise error.TestNAError("Prepare tar file on host failed:%s" % detail)
params['libvirt_domain'] = vm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# Copy file to guest
tar_in_result = gf.tar_in(path_on_host, "/")
logging.debug(tar_in_result)
# Delete file on host
try:
os.remove(path)
os.remove(path_on_host)
except OSError, detail:
# Let it go because file maybe not exist
logging.warning(detail)
if tar_in_result.exit_status:
gf.close_session()
raise error.TestFail("Tar in failed.")
logging.info("Tar in successfully.")
# Cat file on guest
cat_result = gf.cat(path)
rm_result = gf.rm(path)
gf.close_session()
logging.debug(cat_result)
logging.debug(rm_result)
if cat_result.exit_status:
raise error.TestFail("Cat file failed.")
else:
if not re.search(content, cat_result.stdout):
raise error.TestFail("Catted file do not match")
if rm_result.exit_status:
raise error.TestFail("Rm file failed.")
logging.info("Rm %s successfully.", path)
def test_tar_out(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Write a tempfile to guest
3) Copy file to host with tar-out
4) Delete created file
5) Check file on host
"""
content = "This is file for test of tar-out."
path = params.get("gf_temp_file", "/tmp/test_tar_out")
file_dir = os.path.dirname(path)
path_on_host = os.path.join(data_dir.get_tmp_dir(), "test_tar_out.tar")
params['libvirt_domain'] = vm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# Create file
if gf.write_file(path, content) is False:
gf.close_session()
raise error.TestFail("Create file failed.")
logging.info("Create file successfully.")
# Copy file to host
tar_out_result = gf.tar_out(file_dir, path_on_host)
logging.debug(tar_out_result)
if tar_out_result.exit_status:
gf.close_session()
raise error.TestFail("Tar out failed.")
logging.info("Tar out successfully.")
# Delete temp file
rm_result = gf.rm(path)
logging.debug(rm_result)
gf.close_session()
if rm_result.exit_status:
raise error.TestFail("Rm %s failed." % path)
logging.info("Rm %s successfully.", path)
# Uncompress file and check file in it.
uc_result = utils.run("cd %s && tar xf %s" % (file_dir, path_on_host))
logging.debug(uc_result)
try:
os.remove(path_on_host)
except IOError, detail:
raise error.TestFail(str(detail))
if uc_result.exit_status:
raise error.TestFail("Uncompress file on host failed.")
logging.info("Uncompress file on host successfully.")
# Check file
cat_result = utils.run("cat %s" % path, ignore_status=True)
logging.debug(cat_result)
try:
os.remove(path)
except IOError, detail:
logging.error(detail)
if cat_result.exit_status:
raise error.TestFail("Cat file failed.")
else:
if not re.search(content, cat_result.stdout):
raise error.TestFail("Catted file do not match.")
def test_copy_in(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Write a tempfile on host
3) Copy file to guest with copy-in
4) Delete created file
5) Check file on guest
"""
content = "This is file for test of copy-in."
path = params.get("gf_temp_file", "/tmp/test_copy_in")
path_dir = os.path.dirname(path)
# Create a file on host
try:
open(path, 'w').write(content)
except IOError, detail:
raise error.TestNAError("Prepare file on host failed:%s" % detail)
params['libvirt_domain'] = vm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# Copy file to guest
copy_in_result = gf.copy_in(path, path_dir)
logging.debug(copy_in_result)
# Delete file on host
try:
os.remove(path)
except IOError, detail:
logging.error(detail)
if copy_in_result.exit_status:
gf.close_session()
raise error.TestFail("Copy in failed.")
logging.info("Copy in successfully.")
# Cat file on guest
cat_result = gf.cat(path)
rm_result = gf.rm(path)
gf.close_session()
logging.debug(cat_result)
logging.debug(rm_result)
if cat_result.exit_status:
raise error.TestFail("Cat file failed.")
else:
if not re.search(content, cat_result.stdout):
raise error.TestFail("Catted file do not match")
if rm_result.exit_status:
raise error.TestFail("Rm file failed.")
logging.info("Rm %s successfully.", path)
def test_copy_out(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Write a tempfile to guest
3) Copy file to host with copy-out
4) Delete created file
5) Check file on host
"""
content = "This is file for test of copy-out."
path = params.get("gf_temp_file", "/tmp/test_copy_out")
path_dir = os.path.dirname(path)
params['libvirt_domain'] = vm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# Create file
if gf.write_file(path, content) is False:
gf.close_session()
raise error.TestFail("Create file failed.")
logging.info("Create file successfully.")
# Copy file to host
copy_out_result = gf.copy_out(path, path_dir)
logging.debug(copy_out_result)
if copy_out_result.exit_status:
gf.close_session()
raise error.TestFail("Copy out failed.")
logging.info("Copy out successfully.")
# Delete temp file
rm_result = gf.rm(path)
logging.debug(rm_result)
gf.close_session()
if rm_result.exit_status:
raise error.TestFail("Rm %s failed." % path)
logging.info("Rm %s successfully.", path)
# Check file
cat_result = utils.run("cat %s" % path, ignore_status=True)
logging.debug(cat_result.stdout)
try:
os.remove(path)
except IOError, detail:
logging.error(detail)
if cat_result.exit_status:
raise error.TestFail("Cat file failed.")
else:
if not re.search(content, cat_result.stdout):
raise error.TestFail("Catted file do not match.")
def run(test, params, env):
"""
Test guestfs with file commands: tar-in, tar-out, copy-in, copy-out
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
if vm.is_alive():
vm.destroy()
operation = params.get("gf_file_operation")
testcase = globals()["test_%s" % operation]
testcase(vm, params)
|
doduytrung/odoo-8.0
|
refs/heads/master
|
addons/hw_escpos/escpos/supported_devices.py
|
227
|
#!/usr/bin/python
# This is a list of esc/pos compatible usb printers. The vendor and product ids can be found by
# typing lsusb in a linux terminal, this will give you the ids in the form ID VENDOR:PRODUCT
device_list = [
{ 'vendor' : 0x04b8, 'product' : 0x0e03, 'name' : 'Epson TM-T20' },
{ 'vendor' : 0x04b8, 'product' : 0x0202, 'name' : 'Epson TM-T70' },
{ 'vendor' : 0x04b8, 'product' : 0x0e15, 'name' : 'Epson TM-T20II' },
]
|
Anonymouslemming/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/rds.py
|
24
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing
instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely
on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
- mariadb was added in version 2.2
required: false
default: null
choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore.
If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only
when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or
command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it automatically defaults to what is expected for each c(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- >
Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is
assigned. Used only when command=create or command=modify.
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- >
Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or
command=modify.
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with
no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for
the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next
preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds:
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug:
msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
import sys
import time
from ansible.module_utils.ec2 import AWSRetry
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
DEFAULT_PORTS = {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError as e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(
db_instance_identifier=instancename
)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(
db_snapshot_identifier=snapshotid,
snapshot_type='manual'
)['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password,
**params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(
instance_name,
source_instance,
**params
)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(
instance_name,
snapshot,
**params
)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance.create_time,
'status': self.status,
'availability_zone': self.instance.availability_zone,
'backup_retention': self.instance.backup_retention_period,
'backup_window': self.instance.preferred_backup_window,
'maintenance_window': self.instance.preferred_maintenance_window,
'multi_zone': self.instance.multi_az,
'instance_type': self.instance.instance_class,
'username': self.instance.master_username,
'iops': self.instance.iops
}
# Only assign an Endpoint if one is available
if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception as e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
d['endpoint'] = self.instance["Endpoint"].get('Address', None)
d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot.snapshot_create_time,
'status': self.status,
'availability_zone': self.snapshot.availability_zone,
'instance_id': self.snapshot.instance_id,
'instance_created': self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot['SnapshotCreateTime'],
'status': self.status,
'availability_zone': self.snapshot['AvailabilityZone'],
'instance_id': self.snapshot['DBInstanceIdentifier'],
'instance_created': self.snapshot['InstanceCreateTime'],
'snapshot_type': self.snapshot['SnapshotType'],
'iops': self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
start_time = time.time()
wait_timeout = module.params.get('wait_timeout') + start_time
check_interval = 5
while wait_timeout > time.time() and resource.status != status:
time.sleep(check_interval)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name)
if resource is None:
break
# Some RDS resources take much longer than others to be ready. Check
# less aggressively for slow ones to avoid throttling.
if time.time() > start_time + 90:
check_interval = 20
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group', 'port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) is not None and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
if module.params.get(k) is False:
pass
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name=dict(required=False),
source_instance=dict(required=False),
db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
'sqlserver-web', 'postgres', 'aurora'], required=False),
size=dict(required=False),
instance_type=dict(aliases=['type'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(required=False),
engine_version=dict(required=False),
parameter_group=dict(required=False),
license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone=dict(type='bool', required=False),
iops=dict(required=False),
security_groups=dict(required=False),
vpc_security_groups=dict(type='list', required=False),
port=dict(required=False, type='int'),
upgrade=dict(type='bool', default=False),
option_group=dict(required=False),
maint_window=dict(required=False),
backup_window=dict(required=False),
backup_retention=dict(required=False),
zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet=dict(required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
snapshot=dict(required=False),
apply_immediately=dict(type='bool', default=False),
new_instance_name=dict(required=False),
tags=dict(type='dict', required=False),
publicly_accessible=dict(required=False),
character_set_name=dict(required=False),
force_failover=dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
baberthal/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/media/_base/library/__init__.py
|
81
|
from .main import Library
def autoload():
return Library()
config = []
|
nialaa/pythonessie
|
refs/heads/master
|
birthscript.py
|
1
|
#!/usr/bin/python2.7
from time import sleep
import sys
import sys
class ProgressBar():
DEFAULT_BAR_LENGTH = float(65)
def __init__(self, end, start=0):
self.end = end
self.start = start
self._barLength = ProgressBar.DEFAULT_BAR_LENGTH
self.setLevel(self.start)
self._plotted = False
def setLevel(self, level, initial=False):
self._level = level
if level < self.start: self._level = self.start
if level > self.end: self._level = self.end
self._ratio = float(self._level - self.start) / float(self.end - self.start)
self._levelChars = int(self._ratio * self._barLength)
def plotProgress(self):
sys.stdout.write("\r %3i%% [%s%s]" %(
int(self._ratio * 100.0),
'=' * int(self._levelChars),
' ' * int(self._barLength - self._levelChars),
))
sys.stdout.flush()
self._plotted = True
def setAndPlot(self, level):
oldChars = self._levelChars
self.setLevel(level)
if (not self._plotted) or (oldChars != self._levelChars):
self.plotProgress()
def __del__(self):
sys.stdout.write("\n")
def congrats():
print """ YYYYY
HHHHH HHHHH YYY YYYY
HHHHH HHHHH PP PPPPPP YYY YYY
HHH HHH PPP PP YYY YYY
HHH HHH AAAAA PP PPPPPP PP PP YYY YYY
HHH HHH AAAAAAA PPP PP PP PP YYY YYY
HHH HHH AA AA PP PP PP PP YYY YYY
HHH HHH AA AA PP PP PPPPPPP YYYYY
HHHHHHHHHHHHHHH AA AA PP PP PP YYYY
HHHHHHHHHHHHHHH AA AA PPPPPPP PP YYYY -------
HHHHHHHHHHHHHHH AAAAAAAAA PP PP YYYY -------
HHH HHH AAAAAAAAA PP PP YYYY -------
HHH HHH AA AA PP PP YYYY
HHH HHH AA AA PP PP YYYY
HHH HHH AA AA PP PP YYYYYY
HHH HHH AA AA PP PP YYYYYYY
HHH HHH PP PP YYYYYYY
HHHHH HHHHH PP PP YYYYYYY
HHHHH HHHHH PP PP YYYYYYY
YYYYY
BBBBBBBBBBBBB YYY
BBBBBBBBBBBBBB Y
BBBB BBB II YYY YYY
BB BB II DDDDDDDDDDDDD YYYY YYYY
BB BB DDDDDDDDDDDDDD YY YY
BB BB III RRR RRRR DDD DDD A YY YY
BB BB II RRRR RR DDD DDD AAA YY YY
BB BBB II RRR DDD DDD AAAAA YY YY
BBB BBBB II RR DDD DDD AAAAAAA YY YY
BBBBBBBBBBB II RR DDD DDD AA AA YY YY
BBBBBBBBB II RR DDD DDD AA AA YYYY
BBBBBBBBBBB II RR DDD DDD AAAAAAAAA YYY
BBB BBBB IIII RRRR DDD DDD AAAAAAAAA YYY
BB BBB HHH DDD DDD AA AA YYY
BB BBB TT HHHH DDD DDD AA AA YYY
BB BBB TT HH DDD DDD AA AA YYY
BB BBB TTTTTT HH DDD DDD AA AA YYY
BB BBB TT HH DDD DDD YYY
BB BBB TT HHHHHHHH DDDDDDDDDDDDD YYY
BB BBB TT HH HH DDDDDDDDDDDD YYYY
BBBB BBBB TT HH HH YYYY
BBBBBBBBBBBBBBB TT TT HH HH YYYYYYYYYYYYYYYYYYYYYYYYYY
BBBBBBBBBBBBBB TTTT HH HH YYYYYYYYYYYYYYYYYYYYYYYY
HHHH HHHH YYYYYYYYYYYYYYYYYYYYYY
"""
if __name__ == "__main__":
import time
count = 5
question = raw_input("It'your birthday? (Y/N) ")
if question.lower() == 'yes' or question.startswith('y') or question.startswith('Y'):
print 'OK. Loading ',
pb = ProgressBar(count)
curProgress = 0
while curProgress <= count:
pb.setAndPlot(curProgress)
curProgress += 1
time.sleep(.5)
del pb
print 'Loaded!',
sleep(.5)
congrats()
else:
print'Oh, sorry, then happy no-birthday :D'
sleep(.5)
print """
oooo$$$$$$$$$$$$oooo
oo$$$$$$$$$$$$$$$$$$$$$$$$o
oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$
o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $$o$
oo $ $ "$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$o $$$o$$o$
"$$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$o $$$$$$$$
$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$
$$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ '''$$$
"$$$'''$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ '$$$
$$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ '$$$o
o$$' $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$o
$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' '$$$$$$ooooo$$$$o
o$$$oooo$$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$
$$$$$$$$'$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$""""""""
'''' $$$$ '$$$$$$$$$$$$$$$$$$$$$$$$$$$$' o$$$
'$$$o ""'$$$$$$$$$$$$$$$$$$'$$' $$$
$$$o '$$""$$$$$$''''' o$$$
$$$$o o$$$"
"$$$$o o$$$$$$o'$$$$o o$$$$
'$$$$$oo ""$$$$o$$$$$o o$$$$""
""$$$$$oooo '$$$o$$$$$$$$$'''
"'$$$$$$$oo $$$$$$$$$$
''''$$$$$$$$$$$
$$$$$$$$$$$$
$$$$$$$$$$"
"$$$'''"""
|
mcueto/djangorestframework-apicontrol
|
refs/heads/master
|
rest_framework_apicontrol/migrations/0008_loggingevent_logging_group.py
|
1
|
# Generated by Django 2.1.2 on 2018-11-20 03:18
from django.db import migrations, models
import django.db.models.deletion
import rest_framework_apicontrol.utils
class Migration(migrations.Migration):
dependencies = [
('rest_framework_apicontrol', '0007_create_default_logging_group'),
]
operations = [
migrations.AddField(
model_name='loggingevent',
name='logging_group',
field=models.ForeignKey(default=rest_framework_apicontrol.utils.get_default_logging_group, on_delete=django.db.models.deletion.CASCADE, related_name='events', to='rest_framework_apicontrol.LoggingGroup'),
),
]
|
porksmash/swarfarm
|
refs/heads/master
|
herders/autocomplete.py
|
1
|
from django.db.models import Q
from django.template import loader
from dal import autocomplete
from .models import MonsterTag, MonsterInstance
class MonsterInstanceAutocomplete(autocomplete.Select2QuerySetView):
paginate_by = 15
def get_queryset(self):
qs = MonsterInstance.objects.filter(owner__user=self.request.user)
if self.q:
# Split the terms into words and build a Q object
search_terms = self.q.split(' ')
query = Q()
for term in search_terms:
query.add(
Q(monster__name__icontains=term) |
Q(monster__awakens_from__name__icontains=term) |
Q(monster__awakens_to__name__icontains=term) |
Q(monster__element__startswith=term),
Q.AND
)
qs = qs.filter(query)
return qs
def get_result_label(self, item):
return loader.get_template('autocomplete/monster_instance_choice.html').render({'choice': item})
class MonsterTagAutocomplete(autocomplete.Select2QuerySetView):
paginate_by = 15
def get_queryset(self):
qs = MonsterTag.objects.all()
if self.q:
# Filter the queryset
qs = qs.filter(name__icontains=self.q)
return qs
|
timmahrt/ProMo
|
refs/heads/master
|
examples/pitch_morph_to_pitch_contour.py
|
1
|
'''
Created on Jun 29, 2016
This file shows an example of morphing to a pitch tier.
In f0_morph.py, the target pitch contour is extracted in the
script from another file. In this example, the pitch tier
could come from any source (hand sculpted or generated).
WARNING: If you attempt to morph to a pitch track that has
few sample points, the morph process will fail.
@author: Tim
'''
import os
from os.path import join
from praatio import pitch_and_intensity
from praatio import dataio
from promo import f0_morph
from promo.morph_utils import utils
from promo.morph_utils import interpolation
# Define the arguments for the code
root = os.path.abspath(join('.', 'files'))
praatEXE = r"C:\Praat.exe" # Windows paths
praatEXE = "/Applications/Praat.app/Contents/MacOS/Praat" # Mac paths
minPitch = 50
maxPitch = 350
stepList = utils.generateStepList(3)
fromName = "mary1"
fromWavFN = fromName + ".wav"
fromPitchFN = fromName + ".txt"
fromTGFN = join(root, fromName + ".TextGrid")
toName = "mary1_stylized"
toPitchFN = toName + ".PitchTier"
# Prepare the data for morphing
# 1st load it into memory
fromPitchList = pitch_and_intensity.extractPI(join(root, fromWavFN),
join(root, fromPitchFN),
praatEXE, minPitch,
maxPitch, forceRegenerate=False)
fromPitchList = [(time, pitch) for time, pitch, _ in fromPitchList]
# Load in the target pitch contour
pitchTier = dataio.open2DPointObject(join(root, toPitchFN))
toPitchList = [(time, pitch) for time, pitch in pitchTier.pointList]
# The target contour doesn't contain enough sample points, so interpolate
# over the provided samples
# (this step can be skipped if there are enough sample points--a warning
# will be issued if there are any potential problems)
toPitchList = interpolation.quadraticInterpolation(toPitchList, 4, 1000, 0)
# 3rd select which sections to align.
# We'll use textgrids for this purpose.
tierName = "words"
fromPitch = f0_morph.getPitchForIntervals(fromPitchList, fromTGFN, tierName)
toPitch = f0_morph.getPitchForIntervals(toPitchList, fromTGFN, tierName)
# Run the morph process
f0_morph.f0Morph(fromWavFN=join(root, fromWavFN),
pitchPath=root,
stepList=stepList,
outputName="%s_%s_f0_morph" % (fromName, toName),
doPlotPitchSteps=True,
fromPitchData=fromPitch,
toPitchData=toPitch,
outputMinPitch=minPitch,
outputMaxPitch=maxPitch,
praatEXE=praatEXE)
|
kurtdawg24/robotframework
|
refs/heads/master
|
src/robot/utils/robottime.py
|
18
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import time
import re
from .normalizing import normalize
from .misc import plural_or_not
from .robottypes import is_number, is_string
_timer_re = re.compile('([+-])?(\d+:)?(\d+):(\d+)(.\d+)?')
def _get_timetuple(epoch_secs=None):
if epoch_secs is None: # can also be 0 (at least in unit tests)
epoch_secs = time.time()
secs, millis = _float_secs_to_secs_and_millis(epoch_secs)
timetuple = time.localtime(secs)[:6] # from year to secs
return timetuple + (millis,)
def _float_secs_to_secs_and_millis(secs):
isecs = int(secs)
millis = int(round((secs - isecs) * 1000))
return (isecs, millis) if millis < 1000 else (isecs+1, 0)
def timestr_to_secs(timestr, round_to=3):
"""Parses time like '1h 10s', '01:00:10' or '42' and returns seconds."""
if is_string(timestr) or is_number(timestr):
for converter in _number_to_secs, _timer_to_secs, _time_string_to_secs:
secs = converter(timestr)
if secs is not None:
return secs if round_to is None else round(secs, round_to)
raise ValueError("Invalid time string '%s'." % timestr)
def _number_to_secs(number):
try:
return float(number)
except ValueError:
return None
def _timer_to_secs(number):
match = _timer_re.match(number)
if not match:
return None
prefix, hours, minutes, seconds, millis = match.groups()
seconds = float(minutes) * 60 + float(seconds)
if hours:
seconds += float(hours[:-1]) * 60 * 60
if millis:
seconds += float(millis[1:]) / 10**len(millis[1:])
if prefix == '-':
seconds *= -1
return seconds
def _time_string_to_secs(timestr):
timestr = _normalize_timestr(timestr)
if not timestr:
return None
millis = secs = mins = hours = days = 0
if timestr[0] == '-':
sign = -1
timestr = timestr[1:]
else:
sign = 1
temp = []
for c in timestr:
try:
if c == 'x': millis = float(''.join(temp)); temp = []
elif c == 's': secs = float(''.join(temp)); temp = []
elif c == 'm': mins = float(''.join(temp)); temp = []
elif c == 'h': hours = float(''.join(temp)); temp = []
elif c == 'd': days = float(''.join(temp)); temp = []
else: temp.append(c)
except ValueError:
return None
if temp:
return None
return sign * (millis/1000 + secs + mins*60 + hours*60*60 + days*60*60*24)
def _normalize_timestr(timestr):
timestr = normalize(timestr)
for specifier, aliases in [('x', ['millisecond', 'millisec', 'millis',
'msec', 'ms']),
('s', ['second', 'sec']),
('m', ['minute', 'min']),
('h', ['hour']),
('d', ['day'])]:
plural_aliases = [a+'s' for a in aliases if not a.endswith('s')]
for alias in plural_aliases + aliases:
if alias in timestr:
timestr = timestr.replace(alias, specifier)
return timestr
def secs_to_timestr(secs, compact=False):
"""Converts time in seconds to a string representation.
Returned string is in format like
'1 day 2 hours 3 minutes 4 seconds 5 milliseconds' with following rules:
- Time parts having zero value are not included (e.g. '3 minutes 4 seconds'
instead of '0 days 0 hours 3 minutes 4 seconds')
- Hour part has a maximun of 23 and minutes and seconds both have 59
(e.g. '1 minute 40 seconds' instead of '100 seconds')
If compact has value 'True', short suffixes are used.
(e.g. 1d 2h 3min 4s 5ms)
"""
return _SecsToTimestrHelper(secs, compact).get_value()
class _SecsToTimestrHelper:
def __init__(self, float_secs, compact):
self._compact = compact
self._ret = []
self._sign, millis, secs, mins, hours, days \
= self._secs_to_components(float_secs)
self._add_item(days, 'd', 'day')
self._add_item(hours, 'h', 'hour')
self._add_item(mins, 'min', 'minute')
self._add_item(secs, 's', 'second')
self._add_item(millis, 'ms', 'millisecond')
def get_value(self):
if len(self._ret) > 0:
return self._sign + ' '.join(self._ret)
return '0s' if self._compact else '0 seconds'
def _add_item(self, value, compact_suffix, long_suffix):
if value == 0:
return
if self._compact:
suffix = compact_suffix
else:
suffix = ' %s%s' % (long_suffix, plural_or_not(value))
self._ret.append('%d%s' % (value, suffix))
def _secs_to_components(self, float_secs):
if float_secs < 0:
sign = '- '
float_secs = abs(float_secs)
else:
sign = ''
int_secs, millis = _float_secs_to_secs_and_millis(float_secs)
secs = int_secs % 60
mins = int(int_secs / 60) % 60
hours = int(int_secs / (60*60)) % 24
days = int(int_secs / (60*60*24))
return sign, millis, secs, mins, hours, days
def format_time(timetuple_or_epochsecs, daysep='', daytimesep=' ', timesep=':',
millissep=None, gmtsep=None):
"""Returns a timestamp formatted from given time using separators.
Time can be given either as a timetuple or seconds after epoch.
Timetuple is (year, month, day, hour, min, sec[, millis]), where parts must
be integers and millis is required only when millissep is not None.
Notice that this is not 100% compatible with standard Python timetuples
which do not have millis.
Seconds after epoch can be either an integer or a float.
"""
if is_number(timetuple_or_epochsecs):
timetuple = _get_timetuple(timetuple_or_epochsecs)
else:
timetuple = timetuple_or_epochsecs
daytimeparts = ['%02d' % t for t in timetuple[:6]]
day = daysep.join(daytimeparts[:3])
time_ = timesep.join(daytimeparts[3:6])
millis = millissep and '%s%03d' % (millissep, timetuple[6]) or ''
return day + daytimesep + time_ + millis + _diff_to_gmt(gmtsep)
def _diff_to_gmt(sep):
if not sep:
return ''
if time.altzone == 0:
sign = ''
elif time.altzone > 0:
sign = '-'
else:
sign = '+'
minutes = abs(time.altzone) / 60.0
hours, minutes = divmod(minutes, 60)
return '%sGMT%s%s%02d:%02d' % (sep, sep, sign, hours, minutes)
def get_time(format='timestamp', time_=None):
"""Return the given or current time in requested format.
If time is not given, current time is used. How time is returned is
is deternined based on the given 'format' string as follows. Note that all
checks are case insensitive.
- If 'format' contains word 'epoch' the time is returned in seconds after
the unix epoch.
- If 'format' contains any of the words 'year', 'month', 'day', 'hour',
'min' or 'sec' only selected parts are returned. The order of the returned
parts is always the one in previous sentence and order of words in
'format' is not significant. Parts are returned as zero padded strings
(e.g. May -> '05').
- Otherwise (and by default) the time is returned as a timestamp string in
format '2006-02-24 15:08:31'
"""
time_ = int(time_ or time.time())
format = format.lower()
# 1) Return time in seconds since epoc
if 'epoch' in format:
return time_
timetuple = time.localtime(time_)
parts = []
for i, match in enumerate('year month day hour min sec'.split()):
if match in format:
parts.append('%.2d' % timetuple[i])
# 2) Return time as timestamp
if not parts:
return format_time(timetuple, daysep='-')
# Return requested parts of the time
elif len(parts) == 1:
return parts[0]
else:
return parts
def parse_time(timestr):
"""Parses the time string and returns its value as seconds since epoch.
Time can be given in five different formats:
1) Numbers are interpreted as time since epoch directly. It is possible to
use also ints and floats, not only strings containing numbers.
2) Valid timestamp ('YYYY-MM-DD hh:mm:ss' and 'YYYYMMDD hhmmss').
3) 'NOW' (case-insensitive) is the current local time.
4) 'UTC' (case-insensitive) is the current time in UTC.
5) Format 'NOW - 1 day' or 'UTC + 1 hour 30 min' is the current local/UTC
time plus/minus the time specified with the time string.
Seconds are rounded down to avoid getting times in the future.
"""
for method in [_parse_time_epoch,
_parse_time_timestamp,
_parse_time_now_and_utc]:
seconds = method(timestr)
if seconds is not None:
return int(seconds)
raise ValueError("Invalid time format '%s'" % timestr)
def _parse_time_epoch(timestr):
try:
ret = float(timestr)
except ValueError:
return None
if ret < 0:
raise ValueError("Epoch time must be positive (got %s)" % timestr)
return ret
def _parse_time_timestamp(timestr):
try:
return timestamp_to_secs(timestr, (' ', ':', '-', '.'))
except ValueError:
return None
def _parse_time_now_and_utc(timestr):
timestr = timestr.replace(' ', '').lower()
base = _parse_time_now_and_utc_base(timestr[:3])
if base is not None:
extra = _parse_time_now_and_utc_extra(timestr[3:])
if extra is not None:
return base + extra
return None
def _parse_time_now_and_utc_base(base):
now = time.time()
if base == 'now':
return now
if base == 'utc':
zone = time.altzone if time.localtime().tm_isdst else time.timezone
return now + zone
return None
def _parse_time_now_and_utc_extra(extra):
if not extra:
return 0
if extra[0] not in ['+', '-']:
return None
return (1 if extra[0] == '+' else -1) * timestr_to_secs(extra[1:])
def get_timestamp(daysep='', daytimesep=' ', timesep=':', millissep='.'):
return TIMESTAMP_CACHE.get_timestamp(daysep, daytimesep, timesep, millissep)
def timestamp_to_secs(timestamp, seps=None):
try:
secs = _timestamp_to_millis(timestamp, seps) / 1000.0
except (ValueError, OverflowError):
raise ValueError("Invalid timestamp '%s'" % timestamp)
else:
return round(secs, 3)
def secs_to_timestamp(secs, seps=None, millis=False):
if not seps:
seps = ('', ' ', ':', '.' if millis else None)
ttuple = time.localtime(secs)[:6]
if millis:
millis = (secs - int(secs)) * 1000
ttuple = ttuple + (int(round(millis)),)
return format_time(ttuple, *seps)
def get_elapsed_time(start_time, end_time):
"""Returns the time between given timestamps in milliseconds."""
if start_time == end_time or not (start_time and end_time):
return 0
if start_time[:-4] == end_time[:-4]:
return int(end_time[-3:]) - int(start_time[-3:])
start_millis = _timestamp_to_millis(start_time)
end_millis = _timestamp_to_millis(end_time)
# start/end_millis can be long but we want to return int when possible
return int(end_millis - start_millis)
def elapsed_time_to_string(elapsed, include_millis=True):
"""Converts elapsed time in milliseconds to format 'hh:mm:ss.mil'.
If `include_millis` is True, '.mil' part is omitted.
"""
prefix = ''
if elapsed < 0:
elapsed = abs(elapsed)
prefix = '-'
if include_millis:
return prefix + _elapsed_time_to_string(elapsed)
return prefix + _elapsed_time_to_string_without_millis(elapsed)
def _elapsed_time_to_string(elapsed):
secs, millis = divmod(int(round(elapsed)), 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d.%03d' % (hours, mins, secs, millis)
def _elapsed_time_to_string_without_millis(elapsed):
secs = int(round(elapsed, -3)) / 1000
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hours, mins, secs)
def _timestamp_to_millis(timestamp, seps=None):
if seps:
timestamp = _normalize_timestamp(timestamp, seps)
Y, M, D, h, m, s, millis = _split_timestamp(timestamp)
secs = time.mktime(datetime.datetime(Y, M, D, h, m, s).timetuple())
return int(round(1000*secs + millis))
def _normalize_timestamp(ts, seps):
for sep in seps:
if sep in ts:
ts = ts.replace(sep, '')
ts = ts.ljust(17, '0')
return '%s%s%s %s:%s:%s.%s' % (ts[:4], ts[4:6], ts[6:8], ts[8:10],
ts[10:12], ts[12:14], ts[14:17])
def _split_timestamp(timestamp):
years = int(timestamp[:4])
mons = int(timestamp[4:6])
days = int(timestamp[6:8])
hours = int(timestamp[9:11])
mins = int(timestamp[12:14])
secs = int(timestamp[15:17])
millis = int(timestamp[18:21])
return years, mons, days, hours, mins, secs, millis
class TimestampCache(object):
def __init__(self):
self._previous_secs = None
self._previous_separators = None
self._previous_timestamp = None
def get_timestamp(self, daysep='', daytimesep=' ', timesep=':', millissep='.'):
epoch = self._get_epoch()
secs, millis = _float_secs_to_secs_and_millis(epoch)
if self._use_cache(secs, daysep, daytimesep, timesep):
return self._cached_timestamp(millis, millissep)
timestamp = format_time(epoch, daysep, daytimesep, timesep, millissep)
self._cache_timestamp(secs, timestamp, daysep, daytimesep, timesep, millissep)
return timestamp
# Seam for mocking
def _get_epoch(self):
return time.time()
def _use_cache(self, secs, *separators):
return self._previous_timestamp \
and self._previous_secs == secs \
and self._previous_separators == separators
def _cached_timestamp(self, millis, millissep):
if millissep:
return '%s%s%03d' % (self._previous_timestamp, millissep, millis)
return self._previous_timestamp
def _cache_timestamp(self, secs, timestamp, daysep, daytimesep, timesep, millissep):
self._previous_secs = secs
self._previous_separators = (daysep, daytimesep, timesep)
self._previous_timestamp = timestamp[:-4] if millissep else timestamp
TIMESTAMP_CACHE = TimestampCache()
|
Fakor/congov
|
refs/heads/master
|
cli/lib/constants.py
|
1
|
import os
cli_home = os.environ["CLI_PATH"]
gov_home = os.environ["GOV_HOME"]
command_templates_path = os.path.join(gov_home, "resource_files/tests/json_templates")
|
WoLpH/CouchPotatoServer
|
refs/heads/master
|
libs/unrar2/__init__.py
|
16
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
pyUnRAR2 is a ctypes based wrapper around the free UnRAR.dll.
It is an modified version of Jimmy Retzlaff's pyUnRAR - more simple,
stable and foolproof.
Notice that it has INCOMPATIBLE interface.
It enables reading and unpacking of archives created with the
RAR/WinRAR archivers. There is a low-level interface which is very
similar to the C interface provided by UnRAR. There is also a
higher level interface which makes some common operations easier.
"""
__version__ = '0.99.2'
try:
WindowsError
in_windows = True
except NameError:
in_windows = False
if in_windows:
from windows import RarFileImplementation
else:
from unix import RarFileImplementation
import fnmatch, time, weakref
class RarInfo(object):
"""Represents a file header in an archive. Don't instantiate directly.
Use only to obtain information about file.
YOU CANNOT EXTRACT FILE CONTENTS USING THIS OBJECT.
USE METHODS OF RarFile CLASS INSTEAD.
Properties:
index - index of file within the archive
filename - name of the file in the archive including path (if any)
datetime - file date/time as a struct_time suitable for time.strftime
isdir - True if the file is a directory
size - size in bytes of the uncompressed file
comment - comment associated with the file
Note - this is not currently intended to be a Python file-like object.
"""
def __init__(self, rarfile, data):
self.rarfile = weakref.proxy(rarfile)
self.index = data['index']
self.filename = data['filename']
self.isdir = data['isdir']
self.size = data['size']
self.datetime = data['datetime']
self.comment = data['comment']
def __str__(self):
try :
arcName = self.rarfile.archiveName
except ReferenceError:
arcName = "[ARCHIVE_NO_LONGER_LOADED]"
return '<RarInfo "%s" in "%s">' % (self.filename, arcName)
class RarFile(RarFileImplementation):
def __init__(self, archiveName, password=None):
"""Instantiate the archive.
archiveName is the name of the RAR file.
password is used to decrypt the files in the archive.
Properties:
comment - comment associated with the archive
>>> print RarFile('test.rar').comment
This is a test.
"""
self.archiveName = archiveName
RarFileImplementation.init(self, password)
def __del__(self):
self.destruct()
def infoiter(self):
"""Iterate over all the files in the archive, generating RarInfos.
>>> import os
>>> for fileInArchive in RarFile('test.rar').infoiter():
... print os.path.split(fileInArchive.filename)[-1],
... print fileInArchive.isdir,
... print fileInArchive.size,
... print fileInArchive.comment,
... print tuple(fileInArchive.datetime)[0:5],
... print time.strftime('%a, %d %b %Y %H:%M', fileInArchive.datetime)
test True 0 None (2003, 6, 30, 1, 59) Mon, 30 Jun 2003 01:59
test.txt False 20 None (2003, 6, 30, 2, 1) Mon, 30 Jun 2003 02:01
this.py False 1030 None (2002, 2, 8, 16, 47) Fri, 08 Feb 2002 16:47
"""
for params in RarFileImplementation.infoiter(self):
yield RarInfo(self, params)
def infolist(self):
"""Return a list of RarInfos, descripting the contents of the archive."""
return list(self.infoiter())
def read_files(self, condition='*'):
"""Read specific files from archive into memory.
If "condition" is a list of numbers, then return files which have those positions in infolist.
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
and returns boolean True (extract) or False (skip).
If "condition" is omitted, all files are returned.
Returns list of tuples (RarInfo info, str contents)
"""
checker = condition2checker(condition)
return RarFileImplementation.read_files(self, checker)
def extract(self, condition='*', path='.', withSubpath=True, overwrite=True):
"""Extract specific files from archive to disk.
If "condition" is a list of numbers, then extract files which have those positions in infolist.
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
and returns either boolean True (extract) or boolean False (skip).
DEPRECATED: If "condition" callback returns string (only supported for Windows) -
that string will be used as a new name to save the file under.
If "condition" is omitted, all files are extracted.
"path" is a directory to extract to
"withSubpath" flag denotes whether files are extracted with their full path in the archive.
"overwrite" flag denotes whether extracted files will overwrite old ones. Defaults to true.
Returns list of RarInfos for extracted files."""
checker = condition2checker(condition)
return RarFileImplementation.extract(self, checker, path, withSubpath, overwrite)
def condition2checker(condition):
"""Converts different condition types to callback"""
if type(condition) in [str, unicode]:
def smatcher(info):
return fnmatch.fnmatch(info.filename, condition)
return smatcher
elif type(condition) in [list, tuple] and type(condition[0]) in [int, long]:
def imatcher(info):
return info.index in condition
return imatcher
elif callable(condition):
return condition
else:
raise TypeError
|
dustyleary/googletest
|
refs/heads/master
|
test/gtest_filter_unittest.py
|
2826
|
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
|
redhat-openstack/nova
|
refs/heads/f22-patches
|
nova/api/openstack/compute/contrib/shelve.py
|
13
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The shelved mode extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
auth_shelve = exts.extension_authorizer('compute', 'shelve')
auth_shelve_offload = exts.extension_authorizer('compute', 'shelveOffload')
auth_unshelve = exts.extension_authorizer('compute', 'unshelve')
class ShelveController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ShelveController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id,
want_objects=True)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.action('shelve')
def _shelve(self, req, id, body):
"""Move an instance into shelved mode."""
context = req.environ["nova.context"]
auth_shelve(context)
instance = self._get_instance(context, id)
try:
self.compute_api.shelve(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'shelve')
return webob.Response(status_int=202)
@wsgi.action('shelveOffload')
def _shelve_offload(self, req, id, body):
"""Force removal of a shelved instance from the compute node."""
context = req.environ["nova.context"]
auth_shelve_offload(context)
instance = self._get_instance(context, id)
try:
self.compute_api.shelve_offload(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'shelveOffload')
return webob.Response(status_int=202)
@wsgi.action('unshelve')
def _unshelve(self, req, id, body):
"""Restore an instance from shelved mode."""
context = req.environ["nova.context"]
auth_unshelve(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unshelve(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unshelve')
return webob.Response(status_int=202)
class Shelve(exts.ExtensionDescriptor):
"""Instance shelve mode."""
name = "Shelve"
alias = "os-shelve"
namespace = "http://docs.openstack.org/compute/ext/shelve/api/v1.1"
updated = "2013-04-06T00:00:00Z"
def get_controller_extensions(self):
controller = ShelveController()
extension = exts.ControllerExtension(self, 'servers', controller)
return [extension]
|
MungoRae/home-assistant
|
refs/heads/dev
|
homeassistant/components/scene/wink.py
|
12
|
"""
Support for Wink scenes.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/scene.wink/
"""
import asyncio
import logging
from homeassistant.components.scene import Scene
from homeassistant.components.wink import WinkDevice, DOMAIN
DEPENDENCIES = ['wink']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for scene in pywink.get_scenes():
_id = scene.object_id() + scene.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkScene(scene, hass)])
class WinkScene(WinkDevice, Scene):
"""Representation of a Wink shortcut/scene."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
super().__init__(wink, hass)
hass.data[DOMAIN]['entities']['scene'].append(self)
@asyncio.coroutine
def async_added_to_hass(self):
"""Callback when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['scene'].append(self)
@property
def is_on(self):
"""Python-wink will always return False."""
return self.wink.state()
def activate(self, **kwargs):
"""Activate the scene."""
self.wink.activate()
|
0k0k/scrapy
|
refs/heads/master
|
stock/stock/items.py
|
1
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class StockItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
COMPANY_ABBR = scrapy.Field()
COMPANY_CODE = scrapy.Field()
STOCK_DATE=scrapy.Field()
STOCK_TIME=scrapy.Field()
STOCK_DAYSNAP=scrapy.Field()
STOCK_DAYLINE=scrapy.Field()
|
jchevin/MissionPlanner-master
|
refs/heads/master
|
Lib/site-packages/scipy/linalg/_flinalg.py
|
53
|
import sys
if sys.platform == 'cli':
import clr
clr.AddReference("linalg")
from scipy__linalg___flinalg import *
|
carljm/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/unmigrated_app/models.py
|
282
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_tribble = models.ForeignKey("migrations.Tribble", models.CASCADE)
is_trouble = models.BooleanField(default=True)
|
QUANTAXIS/QUANTAXIS
|
refs/heads/master
|
QUANTAXIS/QAFetch/QAOKEx.py
|
2
|
# coding: utf-8
# Author: 阿财(Rgveda@github)(11652964@qq.com)
# Created date: 2020-02-27
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
OKEx api
具体api文档参考:https://www.okex.com/docs/zh/#README
"""
import requests
import json
import datetime
import time
from dateutil.tz import tzutc
import pandas as pd
import numpy as np
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from requests.exceptions import ConnectTimeout, SSLError, ReadTimeout, ConnectionError
from retrying import retry
from urllib.parse import urljoin
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_timestamp_to_str,
QA_util_print_timestamp,
)
from QUANTAXIS.QAUtil import (
QA_util_log_info,
)
TIMEOUT = 10
ILOVECHINA = "同学!!你知道什么叫做科学上网么? 如果你不知道的话,那么就加油吧!蓝灯,喵帕斯,VPS,阴阳师,v2ray,随便什么来一个!我翻墙我骄傲!"
OKEx_base_url = "https://www.okex.com/"
column_names = [
'time',
'open',
'high',
'low',
'close',
'volume',
]
"""
QUANTAXIS 和 okex 的 frequency 常量映射关系
"""
OKEx2QA_FREQUENCY_DICT = {
"60": '1min',
"300": '5min',
"900": '15min',
"1800": '30min',
"3600": '60min',
"86400": 'day',
}
"""
OKEx 只允许一次获取 200bar,时间请求超过范围则只返回最新200条
"""
FREQUENCY_SHIFTING = {
"60": 12000,
"300": 60000,
"900": 180000,
"1800": 360000,
"3600": 720000,
"86400": 17280000
}
def format_okex_data_fields(datas, symbol, frequency):
"""
# 归一化数据字段,转换填充必须字段,删除多余字段
参数名 类型 描述
time String 开始时间
open String 开盘价格
high String 最高价格
low String 最低价格
close String 收盘价格
volume String 交易量
"""
frame = pd.DataFrame(datas, columns=column_names)
frame['symbol'] = 'OKEX.{}'.format(symbol)
# GMT+0 String 转换为 UTC Timestamp
frame['time_stamp'] = pd.to_datetime(frame['time']
).astype(np.int64) // 10**9
# UTC时间转换为北京时间
frame['datetime'] = pd.to_datetime(
frame['time_stamp'], unit='s'
).dt.tz_localize('UTC').dt.tz_convert('Asia/Shanghai')
frame['date'] = frame['datetime'].dt.strftime('%Y-%m-%d')
frame['date_stamp'] = pd.to_datetime(
frame['date']
).dt.tz_localize('Asia/Shanghai').astype(np.int64) // 10**9
frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
frame['created_at'] = int(
time.mktime(datetime.datetime.now().utctimetuple())
)
frame['updated_at'] = int(
time.mktime(datetime.datetime.now().utctimetuple())
)
frame.drop(['time'], axis=1, inplace=True)
frame['trade'] = 1
frame['amount'] = frame.apply(
lambda x: float(x['volume']) *
(float(x['open']) + float(x['close'])) / 2,
axis=1
)
if (frequency not in ['1day', 'day', '86400', '1d']):
frame['type'] = OKEx2QA_FREQUENCY_DICT[frequency]
return frame
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_okex_symbols():
"""
获取交易币对的列表,查询各币对的交易限制和价格步长等信息。
限速规则:20次/2s
HTTP请求 GET/api/spot/v3/instruments
"""
url = urljoin(OKEx_base_url, "/api/spot/v3/instruments")
retries = 1
datas = list()
while (retries != 0):
try:
req = requests.get(url, timeout=TIMEOUT)
retries = 0
except (ConnectTimeout, ConnectionError, SSLError, ReadTimeout):
retries = retries + 1
if (retries % 6 == 0):
print(ILOVECHINA)
print("Retry /api/spot/v3/instruments #{}".format(retries - 1))
time.sleep(0.5)
if (retries == 0):
# 成功获取才处理数据,否则继续尝试连接
symbol_lists = json.loads(req.content)
if len(symbol_lists) == 0:
return []
for symbol in symbol_lists:
datas.append(symbol)
return datas
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_okex_kline_with_auto_retry(
symbol,
start_time,
end_time,
frequency,
):
"""
Get the latest symbol‘s candlestick data raw method
获取币对的K线数据。K线数据按请求的粒度分组返回,k线数据最多可获取200条(说明文档中2000条系错误)。
限速规则:20次/2s
HTTP请求 GET/api/spot/v3/instruments/<instrument_id>/candles
"""
url = urljoin(
OKEx_base_url,
"/api/spot/v3/instruments/{:s}/candles".format(symbol)
)
retries = 1
while (retries != 0):
try:
start_epoch = datetime.datetime.fromtimestamp(
start_time,
tz=tzutc()
)
end_epoch = datetime.datetime.fromtimestamp(end_time, tz=tzutc())
req = requests.get(
url,
params={
"granularity": frequency,
"start": start_epoch.isoformat().replace("+00:00", "Z"), # Z结尾的ISO时间 String
"end": end_epoch.isoformat() .replace("+00:00", "Z") # Z结尾的ISO时间 String
},
timeout=TIMEOUT
)
# 防止频率过快被断连
time.sleep(0.5)
retries = 0
except (ConnectTimeout, ConnectionError, SSLError, ReadTimeout):
retries = retries + 1
if (retries % 6 == 0):
print(ILOVECHINA)
print("Retry /api/spot/v3/instruments #{}".format(retries - 1))
time.sleep(0.5)
if (retries == 0):
# 成功获取才处理数据,否则继续尝试连接
msg_dict = json.loads(req.content)
if ('error_code' in msg_dict):
print('Error', msg_dict)
return None
return msg_dict
return None
def QA_fetch_okex_kline(
symbol,
start_time,
end_time,
frequency,
callback_func=None
):
"""
Get the latest symbol‘s candlestick data
时间倒序切片获取算法,是各大交易所获取1min数据的神器,因为大部分交易所直接请求跨月跨年的1min分钟数据
会直接返回空值,只有将 start_epoch,end_epoch 切片细分到 200/300 bar 以内,才能正确返回 kline,
火币和binance,OKEx 均为如此,直接用跨年时间去直接请求上万bar 的 kline 数据永远只返回最近200条数据。
"""
datas = list()
reqParams = {}
reqParams['from'] = end_time - FREQUENCY_SHIFTING[frequency]
reqParams['to'] = end_time
while (reqParams['to'] > start_time):
if ((reqParams['from'] > QA_util_datetime_to_Unix_timestamp())) or \
((reqParams['from'] > reqParams['to'])):
# 出现“未来”时间,一般是默认时区设置,或者时间窗口滚动前移错误造成的
QA_util_log_info(
'A unexpected \'Future\' timestamp got, Please check self.missing_data_list_func param \'tzlocalize\' set. More info: {:s}@{:s} at {:s} but current time is {}'
.format(
symbol,
frequency,
QA_util_print_timestamp(reqParams['from']),
QA_util_print_timestamp(
QA_util_datetime_to_Unix_timestamp()
)
)
)
# 跳到下一个时间段
reqParams['to'] = int(reqParams['from'] - 1)
reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])
continue
klines = QA_fetch_okex_kline_with_auto_retry(
symbol,
reqParams['from'],
reqParams['to'],
frequency,
)
if (klines is None) or \
(len(klines) == 0) or \
('error' in klines):
# 出错放弃
break
reqParams['to'] = int(reqParams['from'] - 1)
reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])
if (klines is None) or \
((len(datas) > 0) and (klines[-1][0] == datas[-1][0])):
# 没有更多数据
break
datas.extend(klines)
if (callback_func is not None):
frame = format_okex_data_fields(klines, symbol, frequency)
callback_func(frame, OKEx2QA_FREQUENCY_DICT[frequency])
if len(datas) == 0:
return None
# 归一化数据字段,转换填充必须字段,删除多余字段
frame = format_okex_data_fields(datas, symbol, frequency)
return frame
def QA_fetch_okex_kline_min(
symbol,
start_time,
end_time,
frequency,
callback_func=None
):
"""
Get the latest symbol‘s candlestick data with time slices
时间倒序切片获取算法,是各大交易所获取1min数据的神器,因为大部分交易所直接请求跨月跨年的1min分钟数据
会直接返回空值,只有将 start_epoch,end_epoch 切片细分到 200/300 bar 以内,才能正确返回 kline,
火币和binance,OKEx 均为如此,用上面那个函数的方式去直接请求上万bar 的分钟 kline 数据是不会有结果的。
"""
reqParams = {}
reqParams['from'] = end_time - FREQUENCY_SHIFTING[frequency]
reqParams['to'] = end_time
requested_counter = 1
datas = list()
while (reqParams['to'] > start_time):
if ((reqParams['from'] > QA_util_datetime_to_Unix_timestamp())) or \
((reqParams['from'] > reqParams['to'])):
# 出现“未来”时间,一般是默认时区设置,或者时间窗口滚动前移错误造成的
QA_util_log_info(
'A unexpected \'Future\' timestamp got, Please check self.missing_data_list_func param \'tzlocalize\' set. More info: {:s}@{:s} at {:s} but current time is {}'
.format(
symbol,
frequency,
QA_util_print_timestamp(reqParams['from']),
QA_util_print_timestamp(
QA_util_datetime_to_Unix_timestamp()
)
)
)
# 跳到下一个时间段
reqParams['to'] = int(reqParams['from'] - 1)
reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])
continue
klines = QA_fetch_okex_kline_with_auto_retry(
symbol,
reqParams['from'],
reqParams['to'],
frequency,
)
if (klines is None) or \
(len(klines) == 0) or \
('error' in klines):
# 出错放弃
break
reqParams['to'] = int(reqParams['from'] - 1)
reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])
if (callback_func is not None):
frame = format_okex_data_fields(klines, symbol, frequency)
callback_func(frame, OKEx2QA_FREQUENCY_DICT[frequency])
if (len(klines) == 0):
return None
if __name__ == '__main__':
# url = urljoin(OKEx_base_url, "/api/v1/exchangeInfo")
# print(url)
# a = requests.get(url)
# print(a.content)
# print(json.loads(a.content))
pass
|
kisna72/django
|
refs/heads/master
|
django/contrib/staticfiles/management/__init__.py
|
12133432
| |
dracos/django
|
refs/heads/master
|
tests/admin_scripts/simple_app/management/commands/__init__.py
|
12133432
| |
SinnerSchraderMobileMirrors/django-cms
|
refs/heads/develop
|
cms/test_utils/util/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.