repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
fdvarela/odoo8 | addons/l10n_it/__init__.py | 447 | 1161 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
bluevoda/BloggyBlog | lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| gpl-3.0 |
Szkered/PACTT | authentication/views.py | 1 | 2427 | from rest_framework import permissions, viewsets, status, views
from rest_framework.response import Response
from authentication.models import Account
from authentication.permissions import IsAccountOwner
from authentication.serializers import AccountSerializer
import json
from django.contrib.auth import authenticate, login, logout
class AccountViewSet(viewsets.ModelViewSet):
lookup_field = 'username'
queryset = Account.objects.all()
serializer_class = AccountSerializer
def get_permissions(self):
if self.request.method in permissions.SAFE_METHODS:
return (permissions.AllowAny(),)
if self.request.method == 'POST':
return (permissions.AllowAny(),)
return (permissions.IsAuthenticated(), IsAccountOwner(),)
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
Account.objects.create_user(**serializer.validated_data)
return Response(serializer.validated_data, status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad request',
'message': 'Account could not be created with received data.'
}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(views.APIView):
def post(self, request, format=None):
data = json.loads(request.body)
email = data.get('email', None)
password = data.get('password', None)
account = authenticate(email=email, password=password)
if account is not None:
if account.is_active:
login(request, account)
serialized = AccountSerializer(account)
return Response(serialized.data)
else:
return Response({
'status': 'Unauthorized',
'message': 'This account has been disabled.'
}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({
'status': 'Unauthorized',
'message': 'Username/password combination invalid.'
}, status=status.HTTP_401_UNAUTHORIZED)
class LogoutView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, format=None):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT)
| mit |
klim-/pyplane | core/Toolbar.py | 1 | 1723 | # -*- coding: utf-8 -*-
# Copyright (C) 2013
# by Klemens Fritzsche, pyplane@leckstrom.de
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Klemens Fritzsche'
from PyQt4 import QtGui
from matplotlib.backend_bases import NavigationToolbar2 as NavigationToolbar
from matplotlib.backends.backend_qt4 import cursord
class Toolbar(NavigationToolbar):
"""
This class hides the functionality of NavigationToolbar, and only
provides the necessary functions (only zooming at the moment)
"""
def _init_toolbar(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in min(x0, x1), min(y0, y1), w, h]
self.canvas.drawRectangle(rect)
def set_cursor(self, cursor):
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(cursord[cursor]))
if __package__ is None:
__package__ = "core.toolbar"
| gpl-3.0 |
sbellem/django | django/core/management/commands/diffsettings.py | 479 | 1565 | from django.core.management.base import BaseCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Converts a module namespace to a Python dictionary."""
return {k: repr(v) for k, v in module.__dict__.items() if not omittable(k)}
class Command(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', dest='all', default=False,
help='Display all settings, regardless of their value. '
'Default values are prefixed by "###".')
def handle(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| bsd-3-clause |
shineyear/catawampus | tr/vendor/bup/cmd/list-idx-cmd.py | 10 | 1314 | #!/usr/bin/env python
import sys, os
from bup import git, options
from bup.helpers import *
optspec = """
bup list-idx [--find=<prefix>] <idxfilenames...>
--
find= display only objects that start with <prefix>
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
handle_ctrl_c()
opt.find = opt.find or ''
if not extra:
o.fatal('you must provide at least one filename')
if len(opt.find) > 40:
o.fatal('--find parameter must be <= 40 chars long')
else:
if len(opt.find) % 2:
s = opt.find + '0'
else:
s = opt.find
try:
bin = s.decode('hex')
except TypeError:
o.fatal('--find parameter is not a valid hex string')
find = opt.find.lower()
count = 0
for name in extra:
try:
ix = git.open_idx(name)
except git.GitError, e:
add_error('%s: %s' % (name, e))
continue
if len(opt.find) == 40:
if ix.exists(bin):
print name, find
else:
# slow, exhaustive search
for _i in ix:
i = str(_i).encode('hex')
if i.startswith(find):
print name, i
qprogress('Searching: %d\r' % count)
count += 1
if saved_errors:
log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
sys.exit(1)
| apache-2.0 |
BenjamenMeyer/stackInABox | stackinabox/util/responses/decorator.py | 2 | 4117 | """
Stack-In-A-Box: Responses Support via decorator
"""
try:
import collections.abc as collections
except ImportError:
# Py2.7 Support
import collections
import functools
import logging
import re
import types
import responses
import six
from stackinabox.services.service import StackInABoxService
from stackinabox.stack import StackInABox
from stackinabox.util.responses.core import (
responses_registration
)
from stackinabox.util import deprecator
from stackinabox.util.tools import CaseInsensitiveDict
logger = logging.getLogger(__name__)
class activate(object):
"""
Decorator class to make use of Responses and Stack-In-A-Box
extremely simple to do.
"""
def __init__(self, uri, *args, **kwargs):
"""
Initialize the decorator instance
:param uri: URI Stack-In-A-Box will use to recognize the HTTP calls
f.e 'localhost'.
:param text_type access_services: name of a keyword parameter in the
test function to assign access to the services created in the
arguments to the decorator.
:param args: A tuple containing all the positional arguments. Any
StackInABoxService arguments are removed before being passed to
the actual function.
:param kwargs: A dictionary of keyword args that are passed to the
actual function.
"""
self.uri = uri
self.services = {}
self.args = []
self.kwargs = kwargs
if "access_services" in self.kwargs:
self.enable_service_access = self.kwargs["access_services"]
del self.kwargs["access_services"]
else:
self.enable_service_access = None
for arg in args:
if self.process_service(arg, raise_on_type=False):
pass
elif (
isinstance(arg, types.GeneratorType) or
isinstance(arg, collections.Iterable)
):
for sub_arg in arg:
self.process_service(sub_arg, raise_on_type=True)
else:
self.args.append(arg)
def process_service(self, arg_based_service, raise_on_type=True):
if isinstance(arg_based_service, StackInABoxService):
logger.debug("Registering {0}".format(arg_based_service.name))
self.services[arg_based_service.name] = arg_based_service
return True
elif raise_on_type:
raise TypeError(
"Generator or Iterable must provide a "
"StackInABoxService in all of its results."
)
return False
def __call__(self, fn):
"""
Call to actually wrap the function call.
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
args_copy = list(args)
for arg in self.args:
args_copy.append(arg)
args_finalized = tuple(args_copy)
kwargs.update(self.kwargs)
if self.enable_service_access is not None:
kwargs[self.enable_service_access] = self.services
return_value = None
def run():
responses.mock.start()
StackInABox.reset_services()
for service in self.services.values():
StackInABox.register_service(service)
responses_registration(self.uri)
return_value = fn(*args_finalized, **kwargs)
StackInABox.reset_services()
responses.mock.stop()
responses.mock.reset()
with responses.RequestsMock():
run()
return return_value
return wrapped
class stack_activate(activate):
@deprecator.DeprecatedInterface("stack_activate", "activate")
def __init__(self, *args, **kwargs):
super(stack_activate, self).__init__(*args, **kwargs)
@deprecator.DeprecatedInterface("stack_activate", "activate")
def __call__(self, *args, **kwargs):
super(stack_activate, self).__call__(*args, **kwargs)
| apache-2.0 |
VitalPet/c2c-rd-addons | chricar_application_columns/application_columns.py | 4 | 4788 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################
#
# ChriCar Beteiligungs- und Beratungs- GmbH
# Copyright (C) ChriCar Beteiligungs- und Beratungs- GmbH
# all rights reserved
# created 2009-03-27 16:28:26+01
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################
import time
from openerp.osv import fields,osv
#import pooler
class chricar_application_columns(osv.osv):
_name = "chricar.application_columns"
_columns = {
'application_tables_id': fields.many2one('chricar.application_tables','Source Table Name', select=True, required=True),
'char_size' : fields.float ('Size', digits=(4,0)),
'column_fk' : fields.char ('Column FK', size=64),
'column_list_show' : fields.boolean ('List Show', required=True),
'defaults' : fields.char ('Defaults', size=256),
'extra_tab' : fields.boolean ('Extra Tab', required=True),
'help' : fields.char ('Help', size=256),
'inherits_columns' : fields.boolean ('Inherits Columns', required=True),
'is_name' : fields.boolean ('is Name', required=True),
'many2one' : fields.boolean ('Many2one', required=True),
'migrate' : fields.boolean ('Migrate', required=True),
'name_column' : fields.char ('Column Name Display', size=64, required=True),
'name_column_source' : fields.char ('Source Column Name', size=64, required=True),
'name' : fields.char ('Column Name', size=64, required=True),
'num_precision' : fields.float ('Num Precision', digits=(4,0)),
'num_scale' : fields.float ('Num Scale', digits=(4,0)),
'one2many_col_name' : fields.char ('One2many Col Name', size=64),
'read_only' : fields.boolean ('Read Only', required=True),
'required' : fields.boolean ('Required', required=True),
'search' : fields.char ('Search', size=8),
'sequence' : fields.float ('Sequence', digits=(4,0)),
'sort_form' : fields.float ('Form Sort', digits=(4,0)),
'sort_list' : fields.integer ('List Sort'),
'source_pk' : fields.float ('Source PK', digits=(4,0)),
'state' : fields.char ('State', size=16),
'suppress_in_form' : fields.boolean ('Form Suppress', required=True),
'table_fk' : fields.char ('Table FK', size=64),
'table_fk_source' : fields.char ('Source Table Foreign Key', size=64),
'tiny_column_english': fields.char ('Column Name English', size=64),
'translate' : fields.boolean ('Translate', required=True),
'type' : fields.char ('Type', size=16, required=True),
}
_defaults = {
'column_list_show' : lambda *a: True,
'extra_tab' : lambda *a: False,
'inherits_columns' : lambda *a: False,
'is_name' : lambda *a: False,
'many2one' : lambda *a: False,
'migrate' : lambda *a: True,
'read_only' : lambda *a: False,
'required' : lambda *a: False,
'suppress_in_form' : lambda *a: False,
'translate' : lambda *a: False,
}
chricar_application_columns()
class chricar_application_tables(osv.osv):
_inherit = "chricar.application_tables"
_columns = {
'application_columns_ids': fields.one2many('chricar.application_columns','application_tables_id','Application Columns'),
}
chricar_application_tables()
| agpl-3.0 |
drewwestrick/Repy-Web-Server | seattle_repy/nmresourcemath.py | 2 | 7422 | """
Author: Justin Cappos
Module: Node Manager resource math. Helper routines to figure out how to add
two vessels together and divide a vessel into two others.
Start date: September 5th 2008
The design goals of this version are to be secure, simple, and reliable (in
that order).
This is where we worry about the offcut resources...
"""
# need to know what resources are supported
from nanny import known_resources
# need to know what resources are supported
from nanny import individual_item_resources
# need to know what resources are required - Brent
from nanny import must_assign_resources
# NOTE: Should I move all of this into nanny and restrictions? Should I
# have restrictions and nanny call this instead?
# Duplication is bad, but I'm not sure how best to refactor.
from restrictions import get_rule
from restrictions import known_calls
from restrictions import valid_actions
# What we throw when getting an invalid resource / restriction file
class ResourceParseError(Exception):
pass
# reads a restrictions file (tossing the non-resource lines and returning a
# dict of the resources)
def read_resources_from_file(filename):
retdict = {}
for individual_item_resource in individual_item_resources:
retdict[individual_item_resource] = set()
# much of this is adopted from restrictions.py. If you find bugs here,
# check there as well
for line in open(filename):
# remove any comments
noncommentline = line.split('#')[0]
tokenlist = noncommentline.split()
if len(tokenlist) == 0:
# This was a blank or comment line
continue
# should be either a resource or a call line
if tokenlist[0] != 'resource' and tokenlist[0] != 'call':
raise ResourceParseError, "Line '"+line+"' not understood in file '"+filename+"'"
# don't care about calls for this.
if tokenlist[0] == 'call':
continue
####### Okay, it's a resource. It must have two other tokens!
if len(tokenlist) != 3:
raise ResourceParseError, "Line '"+line+"' has wrong number of items in '"+filename+"'"
# and the second token must be a known resource
if tokenlist[1] not in known_resources:
raise ResourceParseError, "Line '"+line+"' has an unknown resource '"+tokenlist[1]+"' in '"+filename+"'"
# and the last item should be a valid float
try:
float(tokenlist[2])
except ValueError:
raise ResourceParseError, "Line '"+line+"' has an invalid resource value '"+tokenlist[2]+"' in '"+filename+"'"
# let's handle individual resources now...
if tokenlist[1] in individual_item_resources:
retdict[tokenlist[1]].add(float(tokenlist[2]))
continue
# non individual resources should not have been previously assigned
if tokenlist[1] in retdict:
raise ResourceParseError, "Line '"+line+"' has a duplicate resource rule for '"+tokenlist[1]+"' in '"+filename+"'"
# Finally, we assign it to the dictionary
retdict[tokenlist[1]] = float(tokenlist[2])
return retdict
# reads a restrictions file (tossing the resource lines and returning a
# string with all of the restrictions data)
def read_restrictionsstring_from_data(restrictionsdata):
retstring = ''
for line in restrictionsdata.split('\n'):
# remove any comments
noncommentline = line.split('#')[0]
tokenlist = noncommentline.split()
# if len(tokenlist) == 0:
# # This was a blank or comment line
# continue
# append call lines
if len(tokenlist) == 0 or tokenlist[0] != 'resource':
retstring = retstring + line+'\n'
#Ignore resource lines, etc.
return retstring
def write_resource_dict(resourcedict, filename):
outfo = open(filename,"w")
for resource in resourcedict:
if type(resourcedict[resource]) == set:
for item in resourcedict[resource]:
print >> outfo, "resource "+resource+" "+str(item)
else:
print >> outfo, "resource "+resource+" "+str(resourcedict[resource])
outfo.close()
def check_for_negative_resources(newdict):
for resource in newdict:
if type(newdict[resource]) != set and newdict[resource] < 0.0:
raise ResourceParseError, "Insufficient quantity: Resource '"+resource+"' has a negative quantity"
# Helper method to ensure that the given resource dict has all of the resources
# listed as required in nanny.py. -Brent
def check_for_required_resources(newdict):
for resource in must_assign_resources:
if resource not in newdict:
raise ResourceParseError("Missing required resource: '"+resource+"'")
def add(dict1, dict2):
retdict = dict1.copy()
# then look at resourcefile1
for resource in dict2:
# if this is a set, then get the union
if type(retdict[resource]) == set:
retdict[resource] = retdict[resource].union(dict2[resource])
continue
# empty if not preexisting
if resource not in retdict:
retdict[resource] = 0.0
# ... and add this item to what we have
retdict[resource] = retdict[resource] + dict2[resource]
return retdict
def subtract(dict1, dict2):
retdict = dict1.copy()
# then look at resourcefile1
for resource in dict2:
# empty if not preexisting
if resource not in retdict:
retdict[resource] = 0.0
# ... and add this item to what we have
retdict[resource] = retdict[resource] - dict2[resource]
return retdict
# add vessels together
def combine(resourcefilename1, resourcefilename2, offcutfilename, newfilename):
# first, read in the files.
offcutresourcedict = read_resources_from_file(offcutfilename)
resourcefile1dict = read_resources_from_file(resourcefilename1)
resourcefile2dict = read_resources_from_file(resourcefilename2)
# combin2 the vessels
tempdict = add(resourcefile1dict, resourcefile2dict)
# add in the offcut resources
newdict = add(offcutresourcedict, tempdict)
# ensure there aren't negative resources here (how could there be?)
check_for_negative_resources(newdict)
# ensure the required resource limits are included.
# (how could this not be?) -Brent
check_for_required_resources(newdict)
# okay, now write out the new file...
write_resource_dict(newdict, newfilename)
# split a vessel
def split(resourcefilename1, resourcefilename2, offcutfilename, newfilename):
# first, read in the files.
offcutresourcedict = read_resources_from_file(offcutfilename)
resourcefile1dict = read_resources_from_file(resourcefilename1)
resourcefile2dict = read_resources_from_file(resourcefilename2)
check_for_negative_resources(resourcefile1dict)
check_for_negative_resources(resourcefile2dict)
# Check to ensure that both resource files have values for all the
# required resources. Without this check, the node manager runs into
# errors when trying to enforce the limits on the required resources
# -Brent
check_for_required_resources(resourcefile1dict)
check_for_required_resources(resourcefile2dict)
# subtract the vessels
tempdict = subtract(resourcefile1dict, resourcefile2dict)
# add in the offcut resources
newdict = subtract(tempdict, offcutresourcedict)
# ensure there aren't negative resources
check_for_negative_resources(newdict)
# Ensure that all the required resource limits are included -Brent
check_for_required_resources(newdict)
# okay, now write out the new file...
write_resource_dict(newdict, newfilename)
| gpl-2.0 |
Tranzystorek/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_tokenizer.py | 420 | 6544 | from __future__ import absolute_import, division, unicode_literals
import json
import warnings
import re
from .support import get_data_files
from html5lib.tokenizer import HTMLTokenizer
from html5lib import constants
class TokenizerTestParser(object):
def __init__(self, initialState, lastStartTag=None):
self.tokenizer = HTMLTokenizer
self._state = initialState
self._lastStartTag = lastStartTag
def parse(self, stream, encoding=None, innerHTML=False):
tokenizer = self.tokenizer(stream, encoding)
self.outputTokens = []
tokenizer.state = getattr(tokenizer, self._state)
if self._lastStartTag is not None:
tokenizer.currentToken = {"type": "startTag",
"name": self._lastStartTag}
types = dict((v, k) for k, v in constants.tokenTypes.items())
for token in tokenizer:
getattr(self, 'process%s' % types[token["type"]])(token)
return self.outputTokens
def processDoctype(self, token):
self.outputTokens.append(["DOCTYPE", token["name"], token["publicId"],
token["systemId"], token["correct"]])
def processStartTag(self, token):
self.outputTokens.append(["StartTag", token["name"],
dict(token["data"][::-1]), token["selfClosing"]])
def processEmptyTag(self, token):
if token["name"] not in constants.voidElements:
self.outputTokens.append("ParseError")
self.outputTokens.append(["StartTag", token["name"], dict(token["data"][::-1])])
def processEndTag(self, token):
self.outputTokens.append(["EndTag", token["name"],
token["selfClosing"]])
def processComment(self, token):
self.outputTokens.append(["Comment", token["data"]])
def processSpaceCharacters(self, token):
self.outputTokens.append(["Character", token["data"]])
self.processSpaceCharacters = self.processCharacters
def processCharacters(self, token):
self.outputTokens.append(["Character", token["data"]])
def processEOF(self, token):
pass
def processParseError(self, token):
self.outputTokens.append(["ParseError", token["data"]])
def concatenateCharacterTokens(tokens):
outputTokens = []
for token in tokens:
if "ParseError" not in token and token[0] == "Character":
if (outputTokens and "ParseError" not in outputTokens[-1] and
outputTokens[-1][0] == "Character"):
outputTokens[-1][1] += token[1]
else:
outputTokens.append(token)
else:
outputTokens.append(token)
return outputTokens
def normalizeTokens(tokens):
# TODO: convert tests to reflect arrays
for i, token in enumerate(tokens):
if token[0] == 'ParseError':
tokens[i] = token[0]
return tokens
def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
ignoreErrors=False):
"""Test whether the test has passed or failed
If the ignoreErrorOrder flag is set to true we don't test the relative
positions of parse errors and non parse errors
"""
checkSelfClosing = False
for token in expectedTokens:
if (token[0] == "StartTag" and len(token) == 4
or token[0] == "EndTag" and len(token) == 3):
checkSelfClosing = True
break
if not checkSelfClosing:
for token in receivedTokens:
if token[0] == "StartTag" or token[0] == "EndTag":
token.pop()
if not ignoreErrorOrder and not ignoreErrors:
return expectedTokens == receivedTokens
else:
# Sort the tokens into two groups; non-parse errors and parse errors
tokens = {"expected": [[], []], "received": [[], []]}
for tokenType, tokenList in zip(list(tokens.keys()),
(expectedTokens, receivedTokens)):
for token in tokenList:
if token != "ParseError":
tokens[tokenType][0].append(token)
else:
if not ignoreErrors:
tokens[tokenType][1].append(token)
return tokens["expected"] == tokens["received"]
def unescape(test):
def decode(inp):
return inp.encode("utf-8").decode("unicode-escape")
test["input"] = decode(test["input"])
for token in test["output"]:
if token == "ParseError":
continue
else:
token[1] = decode(token[1])
if len(token) > 2:
for key, value in token[2]:
del token[2][key]
token[2][decode(key)] = decode(value)
return test
def runTokenizerTest(test):
warnings.resetwarnings()
warnings.simplefilter("error")
expected = concatenateCharacterTokens(test['output'])
if 'lastStartTag' not in test:
test['lastStartTag'] = None
parser = TokenizerTestParser(test['initialState'],
test['lastStartTag'])
tokens = parser.parse(test['input'])
tokens = concatenateCharacterTokens(tokens)
received = normalizeTokens(tokens)
errorMsg = "\n".join(["\n\nInitial state:",
test['initialState'],
"\nInput:", test['input'],
"\nExpected:", repr(expected),
"\nreceived:", repr(tokens)])
errorMsg = errorMsg
ignoreErrorOrder = test.get('ignoreErrorOrder', False)
assert tokensMatch(expected, received, ignoreErrorOrder, True), errorMsg
def _doCapitalize(match):
return match.group(1).upper()
_capitalizeRe = re.compile(r"\W+(\w)").sub
def capitalize(s):
s = s.lower()
s = _capitalizeRe(_doCapitalize, s)
return s
def testTokenizer():
for filename in get_data_files('tokenizer', '*.test'):
with open(filename) as fp:
tests = json.load(fp)
if 'tests' in tests:
for index, test in enumerate(tests['tests']):
if 'initialStates' not in test:
test["initialStates"] = ["Data state"]
if 'doubleEscaped' in test:
test = unescape(test)
for initialState in test["initialStates"]:
test["initialState"] = capitalize(initialState)
yield runTokenizerTest, test
| mpl-2.0 |
oandrew/home-assistant | homeassistant/components/climate/mysensors.py | 10 | 7483 | """
mysensors platform that offers a Climate(MySensors-HVAC) component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/climate.mysensors
"""
import logging
from homeassistant.components import mysensors
from homeassistant.components.climate import (
STATE_COOL, STATE_HEAT, STATE_OFF, STATE_AUTO, ClimateDevice,
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW)
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT, ATTR_TEMPERATURE
_LOGGER = logging.getLogger(__name__)
DICT_HA_TO_MYS = {STATE_COOL: "CoolOn", STATE_HEAT: "HeatOn",
STATE_AUTO: "AutoChangeOver", STATE_OFF: "Off"}
DICT_MYS_TO_HA = {"CoolOn": STATE_COOL, "HeatOn": STATE_HEAT,
"AutoChangeOver": STATE_AUTO, "Off": STATE_OFF}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the mysensors climate."""
if discovery_info is None:
return
gateways = hass.data.get(mysensors.MYSENSORS_GATEWAYS)
if not gateways:
return
for gateway in gateways:
if float(gateway.protocol_version) < 1.5:
continue
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
map_sv_types = {
pres.S_HVAC: [set_req.V_HVAC_FLOW_STATE],
}
devices = {}
gateway.platform_callbacks.append(mysensors.pf_callback_factory(
map_sv_types, devices, add_devices, MySensorsHVAC))
class MySensorsHVAC(mysensors.MySensorsDeviceEntity, ClimateDevice):
"""Representation of a MySensorsHVAC hvac."""
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return (TEMP_CELSIUS
if self.gateway.metric else TEMP_FAHRENHEIT)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._values.get(self.gateway.const.SetReq.V_TEMP)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values and \
set_req.V_HVAC_SETPOINT_HEAT in self._values:
return None
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
if temp is None:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return temp
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_HEAT in self._values:
return self._values.get(set_req.V_HVAC_SETPOINT_COOL)
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values:
return self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._values.get(self.gateway.const.SetReq.V_HVAC_FLOW_STATE)
@property
def operation_list(self):
"""List of available operation modes."""
return [STATE_OFF, STATE_AUTO, STATE_COOL, STATE_HEAT]
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._values.get(self.gateway.const.SetReq.V_HVAC_SPEED)
@property
def fan_list(self):
"""List of available fan modes."""
return ["Auto", "Min", "Normal", "Max"]
def set_temperature(self, **kwargs):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
temp = kwargs.get(ATTR_TEMPERATURE)
low = kwargs.get(ATTR_TARGET_TEMP_LOW)
high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
heat = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
cool = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
updates = ()
if temp is not None:
if heat is not None:
# Set HEAT Target temperature
value_type = set_req.V_HVAC_SETPOINT_HEAT
elif cool is not None:
# Set COOL Target temperature
value_type = set_req.V_HVAC_SETPOINT_COOL
if heat is not None or cool is not None:
updates = [(value_type, temp)]
elif all(val is not None for val in (low, high, heat, cool)):
updates = [
(set_req.V_HVAC_SETPOINT_HEAT, low),
(set_req.V_HVAC_SETPOINT_COOL, high)]
for value_type, value in updates:
self.gateway.set_child_value(
self.node_id, self.child_id, value_type, value)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[value_type] = value
self.update_ha_state()
def set_fan_mode(self, fan):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(self.node_id, self.child_id,
set_req.V_HVAC_SPEED, fan)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[set_req.V_HVAC_SPEED] = fan
self.update_ha_state()
def set_operation_mode(self, operation_mode):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(self.node_id, self.child_id,
set_req.V_HVAC_FLOW_STATE,
DICT_HA_TO_MYS[operation_mode])
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[set_req.V_HVAC_FLOW_STATE] = operation_mode
self.update_ha_state()
def update(self):
"""Update the controller with the latest value from a sensor."""
set_req = self.gateway.const.SetReq
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
for value_type, value in child.values.items():
_LOGGER.debug(
'%s: value_type %s, value = %s', self._name, value_type, value)
if value_type == set_req.V_HVAC_FLOW_STATE:
self._values[value_type] = DICT_MYS_TO_HA[value]
else:
self._values[value_type] = value
def set_humidity(self, humidity):
"""Set new target humidity."""
_LOGGER.error("Service Not Implemented yet")
def set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
_LOGGER.error("Service Not Implemented yet")
def turn_away_mode_on(self):
"""Turn away mode on."""
_LOGGER.error("Service Not Implemented yet")
def turn_away_mode_off(self):
"""Turn away mode off."""
_LOGGER.error("Service Not Implemented yet")
def turn_aux_heat_on(self):
"""Turn auxillary heater on."""
_LOGGER.error("Service Not Implemented yet")
def turn_aux_heat_off(self):
"""Turn auxillary heater off."""
_LOGGER.error("Service Not Implemented yet")
| mit |
ademuk/django-oscar | src/oscar/apps/dashboard/catalogue/forms.py | 5 | 16311 | from django import forms
from django.core import exceptions
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_class, get_model
from oscar.core.utils import slugify
from oscar.forms.widgets import ImageInput
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
Category = get_model('catalogue', 'Category')
StockRecord = get_model('partner', 'StockRecord')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
ProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect')
CategoryForm = movenodeform_factory(
Category,
fields=['name', 'description', 'image'])
class ProductClassSelectForm(forms.Form):
"""
Form which is used before creating a product to select it's product class
"""
product_class = forms.ModelChoiceField(
label=_("Create a new product of type"),
empty_label=_("-- Choose type --"),
queryset=ProductClass.objects.all())
def __init__(self, *args, **kwargs):
"""
If there's only one product class, pre-select it
"""
super(ProductClassSelectForm, self).__init__(*args, **kwargs)
qs = self.fields['product_class'].queryset
if not kwargs.get('initial') and len(qs) == 1:
self.fields['product_class'].initial = qs[0]
class ProductSearchForm(forms.Form):
upc = forms.CharField(max_length=16, required=False, label=_('UPC'))
title = forms.CharField(
max_length=255, required=False, label=_('Product title'))
def clean(self):
cleaned_data = super(ProductSearchForm, self).clean()
cleaned_data['upc'] = cleaned_data['upc'].strip()
cleaned_data['title'] = cleaned_data['title'].strip()
return cleaned_data
class StockRecordForm(forms.ModelForm):
def __init__(self, product_class, user, *args, **kwargs):
# The user kwarg is not used by stock StockRecordForm. We pass it
# anyway in case one wishes to customise the partner queryset
self.user = user
super(StockRecordForm, self).__init__(*args, **kwargs)
# If not tracking stock, we hide the fields
if not product_class.track_stock:
del self.fields['num_in_stock']
del self.fields['low_stock_threshold']
else:
self.fields['price_excl_tax'].required = True
self.fields['num_in_stock'].required = True
class Meta:
model = StockRecord
fields = [
'partner', 'partner_sku',
'price_currency', 'price_excl_tax', 'price_retail', 'cost_price',
'num_in_stock', 'low_stock_threshold',
]
BaseStockRecordFormSet = inlineformset_factory(
Product, StockRecord, form=StockRecordForm, extra=1)
class StockRecordFormSet(BaseStockRecordFormSet):
def __init__(self, product_class, user, *args, **kwargs):
self.user = user
self.require_user_stockrecord = not user.is_staff
self.product_class = product_class
super(StockRecordFormSet, self).__init__(*args, **kwargs)
self.set_initial_data()
def set_initial_data(self):
"""
If user has only one partner associated, set the first
stock record's partner to it. Can't pre-select for staff users as
they're allowed to save a product without a stock record.
This is intentionally done after calling __init__ as passing initial
data to __init__ creates a form for each list item. So depending on
whether we can pre-select the partner or not, we'd end up with 1 or 2
forms for an unbound form.
"""
if self.require_user_stockrecord:
try:
user_partner = self.user.partners.get()
except (exceptions.ObjectDoesNotExist,
exceptions.MultipleObjectsReturned):
pass
else:
partner_field = self.forms[0].fields.get('partner', None)
if partner_field and partner_field.initial is None:
partner_field.initial = user_partner
def _construct_form(self, i, **kwargs):
kwargs['product_class'] = self.product_class
kwargs['user'] = self.user
return super(StockRecordFormSet, self)._construct_form(
i, **kwargs)
def clean(self):
"""
If the user isn't a staff user, this validation ensures that at least
one stock record's partner is associated with a users partners.
"""
if any(self.errors):
return
if self.require_user_stockrecord:
stockrecord_partners = set([form.cleaned_data.get('partner', None)
for form in self.forms])
user_partners = set(self.user.partners.all())
if not user_partners & stockrecord_partners:
raise exceptions.ValidationError(
_("At least one stock record must be set to a partner that"
" you're associated with."))
def _attr_text_field(attribute):
return forms.CharField(label=attribute.name,
required=attribute.required)
def _attr_textarea_field(attribute):
return forms.CharField(label=attribute.name,
widget=forms.Textarea(),
required=attribute.required)
def _attr_integer_field(attribute):
return forms.IntegerField(label=attribute.name,
required=attribute.required)
def _attr_boolean_field(attribute):
return forms.BooleanField(label=attribute.name,
required=attribute.required)
def _attr_float_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_date_field(attribute):
return forms.DateField(label=attribute.name,
required=attribute.required,
widget=forms.widgets.DateInput)
def _attr_option_field(attribute):
return forms.ModelChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_multi_option_field(attribute):
return forms.ModelMultipleChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_entity_field(attribute):
# Product entities don't have out-of-the-box supported in the ProductForm.
# There is no ModelChoiceField for generic foreign keys, and there's no
# good default behaviour anyway; offering a choice of *all* model instances
# is hardly useful.
return None
def _attr_numeric_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_file_field(attribute):
return forms.FileField(
label=attribute.name, required=attribute.required)
def _attr_image_field(attribute):
return forms.ImageField(
label=attribute.name, required=attribute.required)
class ProductForm(forms.ModelForm):
FIELD_FACTORIES = {
"text": _attr_text_field,
"richtext": _attr_textarea_field,
"integer": _attr_integer_field,
"boolean": _attr_boolean_field,
"float": _attr_float_field,
"date": _attr_date_field,
"option": _attr_option_field,
"multi_option": _attr_multi_option_field,
"entity": _attr_entity_field,
"numeric": _attr_numeric_field,
"file": _attr_file_field,
"image": _attr_image_field,
}
class Meta:
model = Product
fields = [
'title', 'upc', 'description', 'is_discountable', 'structure']
widgets = {
'structure': forms.HiddenInput()
}
def __init__(self, product_class, data=None, parent=None, *args, **kwargs):
self.set_initial(product_class, parent, kwargs)
super(ProductForm, self).__init__(data, *args, **kwargs)
if parent:
self.instance.parent = parent
# We need to set the correct product structures explicitly to pass
# attribute validation and child product validation. Note that
# those changes are not persisted.
self.instance.structure = Product.CHILD
self.instance.parent.structure = Product.PARENT
self.delete_non_child_fields()
else:
# Only set product class for non-child products
self.instance.product_class = product_class
self.add_attribute_fields(product_class, self.instance.is_parent)
if 'title' in self.fields:
self.fields['title'].widget = forms.TextInput(
attrs={'autocomplete': 'off'})
def set_initial(self, product_class, parent, kwargs):
"""
Set initial data for the form. Sets the correct product structure
and fetches initial values for the dynamically constructed attribute
fields.
"""
if 'initial' not in kwargs:
kwargs['initial'] = {}
self.set_initial_attribute_values(product_class, kwargs)
if parent:
kwargs['initial']['structure'] = Product.CHILD
def set_initial_attribute_values(self, product_class, kwargs):
"""
Update the kwargs['initial'] value to have the initial values based on
the product instance's attributes
"""
instance = kwargs.get('instance')
if instance is None:
return
for attribute in product_class.attributes.all():
try:
value = instance.attribute_values.get(
attribute=attribute).value
except exceptions.ObjectDoesNotExist:
pass
else:
kwargs['initial']['attr_%s' % attribute.code] = value
def add_attribute_fields(self, product_class, is_parent=False):
"""
For each attribute specified by the product class, this method
dynamically adds form fields to the product form.
"""
for attribute in product_class.attributes.all():
field = self.get_attribute_field(attribute)
if field:
self.fields['attr_%s' % attribute.code] = field
# Attributes are not required for a parent product
if is_parent:
self.fields['attr_%s' % attribute.code].required = False
def get_attribute_field(self, attribute):
"""
Gets the correct form field for a given attribute type.
"""
return self.FIELD_FACTORIES[attribute.type](attribute)
def delete_non_child_fields(self):
"""
Deletes any fields not needed for child products. Override this if
you want to e.g. keep the description field.
"""
for field_name in ['description', 'is_discountable']:
if field_name in self.fields:
del self.fields[field_name]
def _post_clean(self):
"""
Set attributes before ModelForm calls the product's clean method
(which it does in _post_clean), which in turn validates attributes.
"""
product_class = self.instance.get_product_class()
for attribute in product_class.attributes.all():
field_name = 'attr_%s' % attribute.code
# An empty text field won't show up in cleaned_data.
if field_name in self.cleaned_data:
value = self.cleaned_data[field_name]
setattr(self.instance.attr, attribute.code, value)
super(ProductForm, self)._post_clean()
class StockAlertSearchForm(forms.Form):
status = forms.CharField(label=_('Status'))
class ProductCategoryForm(forms.ModelForm):
class Meta:
model = ProductCategory
fields = ('category', )
BaseProductCategoryFormSet = inlineformset_factory(
Product, ProductCategory, form=ProductCategoryForm, extra=1,
can_delete=True)
class ProductCategoryFormSet(BaseProductCategoryFormSet):
def __init__(self, product_class, user, *args, **kwargs):
# This function just exists to drop the extra arguments
super(ProductCategoryFormSet, self).__init__(*args, **kwargs)
def clean(self):
if not self.instance.is_child and self.get_num_categories() == 0:
raise forms.ValidationError(
_("Stand-alone and parent products "
"must have at least one category"))
if self.instance.is_child and self.get_num_categories() > 0:
raise forms.ValidationError(
_("A child product should not have categories"))
def get_num_categories(self):
num_categories = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if (hasattr(form, 'cleaned_data')
and form.cleaned_data.get('category', None)
and not form.cleaned_data.get('DELETE', False)):
num_categories += 1
return num_categories
class ProductImageForm(forms.ModelForm):
class Meta:
model = ProductImage
fields = ['product', 'original', 'caption']
# use ImageInput widget to create HTML displaying the
# actual uploaded image and providing the upload dialog
# when clicking on the actual image.
widgets = {
'original': ImageInput(),
}
def save(self, *args, **kwargs):
# We infer the display order of the image based on the order of the
# image fields within the formset.
kwargs['commit'] = False
obj = super(ProductImageForm, self).save(*args, **kwargs)
obj.display_order = self.get_display_order()
obj.save()
return obj
def get_display_order(self):
return self.prefix.split('-').pop()
BaseProductImageFormSet = inlineformset_factory(
Product, ProductImage, form=ProductImageForm, extra=2)
class ProductImageFormSet(BaseProductImageFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductImageFormSet, self).__init__(*args, **kwargs)
class ProductRecommendationForm(forms.ModelForm):
class Meta:
model = ProductRecommendation
fields = ['primary', 'recommendation', 'ranking']
widgets = {
'recommendation': ProductSelect,
}
BaseProductRecommendationFormSet = inlineformset_factory(
Product, ProductRecommendation, form=ProductRecommendationForm,
extra=5, fk_name="primary")
class ProductRecommendationFormSet(BaseProductRecommendationFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductRecommendationFormSet, self).__init__(*args, **kwargs)
class ProductClassForm(forms.ModelForm):
class Meta:
model = ProductClass
fields = ['name', 'requires_shipping', 'track_stock', 'options']
class ProductAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProductAttributesForm, self).__init__(*args, **kwargs)
# because we'll allow submission of the form with blank
# codes so that we can generate them.
self.fields["code"].required = False
self.fields["option_group"].help_text = _("Select an option group")
def clean_code(self):
code = self.cleaned_data.get("code")
title = self.cleaned_data.get("name")
if not code and title:
code = slugify(title)
return code
class Meta:
model = ProductAttribute
fields = ["name", "code", "type", "option_group", "required"]
ProductAttributesFormSet = inlineformset_factory(ProductClass,
ProductAttribute,
form=ProductAttributesForm,
extra=3)
| bsd-3-clause |
Pagten/picamserver | moveimages.py | 1 | 7472 | #!/usr/bin/python3
import os
import shutil
import logging
import re
import hashlib
import sys
import functools
import subprocess
from logging.handlers import RotatingFileHandler
from functools import partial
LOG_FILE = 'moveimages.log'
SOURCE_FOLDERS = ['/mnt/usb/timelapse/', '/mnt/sdcard/timelapse/']
TARGET_FOLDERS = ['/mnt/storage0/timelapse/']
MOUNT_POINTS = ['/mnt/storage0/']
MD5SUM_REGEX = re.compile(r"_md5-(?P<md5sum>[0-9A-Fa-f]32)[_\.]")
def is_subdir_of(path, directory):
path = os.path.realpath(path)
directory = os.path.realpath(directory)
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir + os.sep)
def get_md5sum(path, try_from_basename=False):
if try_from_basename:
match = MD5SUM_REGEX.search(os.path.basename(path))
if match:
md5sum = match.group('md5sum')
if md5sum:
logging.debug('Found MD5 sum in filename %s: %s', path, md5sum)
return md5sum
d = hashlib.md5()
with open(path, mode='rb') as f:
for buf in iter(partial(f.read, 4096), b''):
d.update(buf)
md5sum = d.hexdigest()
logging.debug("Calculated MD5 sum %s for '%s'", md5sum, path)
return md5sum
def mount(path):
subprocess.call(["/bin/mount", path], timeout=10)
class InvalidPathException(Exception):
def __init(self, path):
self.path = path
def __str__(self):
return repr(self.path)
class FileMover():
def __init__(self, src_folders, dst_folders, mountpoints):
self.logger = logging.getLogger(type(self).__name__)
self.src_folders = src_folders
self.dst_folders = dst_folders
self.mountpoints = mountpoints
def is_src_path(self, path):
return any([is_subdir_of(path, x) for x in self.src_folders])
def is_dst_path(self, path):
return any([is_subdir_of(path, x) for x in self.dst_folders])
def _check_src_path(self, src_path):
if not self.is_src_path(src_path):
logging.error("Path '%s' is not a file or subdirectory of any configured source folder", src_path)
raise InvalidPathException(src_path)
return src_path
def _check_dst_path(self, dst_path):
if not self.is_dst_path(dst_path):
logging.error("Path '%s' is not a subdirectory of any configured destination folder", dst_path)
raise InvalidPathException(dst_path)
return dst_path
def run(self):
# First try to mount all destination folders
for path in self.mountpoints:
self.logger.info("Mounting '%s'", path)
try:
mount(path)
except Exception as e:
self.logger.error("Error mounting folder '%s': %s", path, str(e))
# Start moving files
for src_path in self.src_folders:
self.logger.info("Handling source folder '%s'", src_path)
for name in os.listdir(src_path):
path = os.path.join(src_path, name)
if os.path.isdir(path):
self.move_dir(src_path, name, self.dst_folders)
elif os.path.isfile(path):
self.move_file(src_path, name, self.dst_folders)
def move_dir(self, base_src_path, rel_src_path, base_dst_paths):
src_path = self._check_src_path(os.path.join(base_src_path, rel_src_path))
dst_paths = list(map(lambda base_dst_path: self._check_dst_path(os.path.join(base_dst_path, rel_src_path)), base_dst_paths))
self.logger.info("Copying dir '%s'", src_path)
dst_reachable = False
for dst_path in dst_paths:
try:
os.mkdir(dst_path)
dst_reachable = True
except FileExistsError as e:
dst_reachable = True
except Exception as e:
self.logger.warning("Unable to create destination dir '%s': %s", dst_path, str(e))
if not dst_reachable:
self.logger.error("Aborting because no destination dir could be created")
return;
for name in os.listdir(src_path):
path = os.path.join(src_path, name)
if os.path.isdir(path):
self.move_dir(src_path, name, dst_paths)
elif os.path.isfile(path):
self.move_file(src_path, name, dst_paths)
try:
os.rmdir(src_path)
self.logger.info("Removed empty source dir '%s'", src_path)
except OSError as e:
self.logger.warning("Not removing source dir '%s' because it is not empty: %s", src_path, str(e))
def move_file(self, src_dir_path, filename, dst_dir_paths):
# Check input
if len(dst_dir_paths) == 0:
raise ValueError("List of destination dirs cannot be empty")
src_path = self._check_src_path(os.path.join(src_dir_path, filename))
for dst_dir_path in dst_dir_paths:
self._check_dst_path(os.path.join(dst_dir_path, filename))
# Calculate or retreive MD5 sum
try:
src_md5sum = get_md5sum(src_path, try_from_basename=True)
except Exception as e:
self.logger.error("Error retrieving MD5 sum of '%s': %s", src_path, str(e))
return
filesize = os.stat(src_path).st_size
all_ok = True
for dst_dir_path in dst_dir_paths:
dst_path = os.path.join(dst_dir_path, filename)
if os.path.isfile(dst_path):
try:
dst_md5sum = get_md5sum(dst_path, try_from_basename=False)
except Exception as e:
self.logger.error("Unable to calculate MD5 sum of existing file '%s': %s", dst_path, str(e))
dst_md5sum = None
if dst_md5sum is not None and dst_md5sum == src_md5sum:
self.logger.info("Not copying file '%s' to '%s' because destination file already exists and MD5 sum matches source", src_path, dst_path)
continue
else:
self.logger.warning("Destination file '%s' already exists, but MD5 sum does not match source. Will overwrite...", dst_path)
# Perform copy
self.logger.info("Copying file '%s' to '%s' [filesize: %d KB, md5sum: %s]", src_path, dst_path, filesize // 1024, src_md5sum)
try:
shutil.copy2(src_path, dst_path)
self.logger.debug("Copied file '%s' to '%s'", src_path, dst_path)
except Exception as e:
all_ok = False
self.logger.error("Unable to copy file '%s' to '%s': %s", src_path, dst_path, str(e))
continue
# Calculate and compare MD5 sum
try:
dst_md5sum = get_md5sum(dst_path, try_from_basename=False)
except Exception as e:
all_ok = False
self.logger.error("Error calculating MD5 sum of '%s': %s", dst_path, str(e))
continue
if src_md5sum != dst_md5sum:
all_ok = False
self.logger.error("MD5 sum of '%s' does NOT match that '%s'. Found %s instead of %s", dst_path, src_path, dst_md5sum, src_md5sum)
# Remove if all MD5 sums matched
if all_ok:
self.logger.info("Removing '%s'", src_path)
os.remove(src_path)
else:
self.logger.warning("Not removing '%s' because not all destination MD5 sums matched", src_path)
def setup_logging():
root_log = logging.getLogger('')
root_log.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
root_log.addHandler(stream_handler)
file_handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=(1024*1024*10), backupCount=7)
file_handler.setFormatter(formatter)
root_log.addHandler(file_handler)
def main():
setup_logging()
fm = FileMover(SOURCE_FOLDERS, TARGET_FOLDERS, MOUNT_POINTS)
fm.run()
if __name__ == "__main__":
main()
| gpl-3.0 |
apechimp/servo | tests/wpt/web-platform-tests/tools/manifest/manifest.py | 10 | 12521 | import json
import os
from collections import defaultdict
from item import item_types, ManualTest, WebdriverSpecTest, Stub, RefTest, TestharnessTest
from log import get_logger
from sourcefile import SourceFile
CURRENT_VERSION = 2
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
class Manifest(object):
def __init__(self, git_rev=None, url_base="/"):
# Dict of item_type: {path: set(manifest_items)}
self._data = dict((item_type, defaultdict(set))
for item_type in item_types)
self.rev = git_rev
self.url_base = url_base
self.local_changes = LocalChanges(self)
# reftest nodes arranged as {path: set(manifest_items)}
self.reftest_nodes = defaultdict(set)
self.reftest_nodes_by_url = {}
def _included_items(self, include_types=None):
if include_types is None:
include_types = item_types
for item_type in include_types:
paths = self._data[item_type].copy()
for local_types, local_paths in self.local_changes.itertypes(item_type):
for path, items in local_paths.iteritems():
paths[path] = items
for path in self.local_changes.iterdeleted():
if path in paths:
del paths[path]
yield item_type, paths
def contains_path(self, path):
return any(path in paths for _, paths in self._included_items())
def add(self, item):
if item is None:
return
is_reference = False
if isinstance(item, RefTest):
self.reftest_nodes[item.path].add(item)
self.reftest_nodes_by_url[item.url] = item
is_reference = item.is_reference
if not is_reference:
self._add(item)
item.manifest = self
def _add(self, item):
self._data[item.item_type][item.path].add(item)
def extend(self, items):
for item in items:
self.add(item)
def remove_path(self, path):
for item_type in item_types:
if path in self._data[item_type]:
del self._data[item_type][path]
def itertypes(self, *types):
if not types:
types = None
for item_type, items in self._included_items(types):
for item in sorted(items.items()):
yield item
def __iter__(self):
for item in self.itertypes():
yield item
def __getitem__(self, path):
for _, paths in self._included_items():
if path in paths:
return paths[path]
raise KeyError
def get_reference(self, url):
if url in self.local_changes.reftest_nodes_by_url:
return self.local_changes.reftest_nodes_by_url[url]
if url in self.reftest_nodes_by_url:
return self.reftest_nodes_by_url[url]
return None
def _committed_with_path(self, rel_path):
rv = set()
for paths_items in self._data.itervalues():
rv |= paths_items.get(rel_path, set())
if rel_path in self.reftest_nodes:
rv |= self.reftest_nodes[rel_path]
return rv
def _committed_paths(self):
rv = set()
for paths_items in self._data.itervalues():
rv |= set(paths_items.keys())
return rv
def update(self,
tests_root,
url_base,
new_rev,
committed_changes=None,
local_changes=None,
remove_missing_local=False):
if local_changes is None:
local_changes = {}
if committed_changes is not None:
for rel_path, status in committed_changes:
self.remove_path(rel_path)
if status == "modified":
use_committed = rel_path in local_changes
source_file = SourceFile(tests_root,
rel_path,
url_base,
use_committed=use_committed)
self.extend(source_file.manifest_items())
self.local_changes = LocalChanges(self)
local_paths = set()
for rel_path, status in local_changes.iteritems():
local_paths.add(rel_path)
if status == "modified":
existing_items = self._committed_with_path(rel_path)
source_file = SourceFile(tests_root,
rel_path,
url_base,
use_committed=False)
local_items = set(source_file.manifest_items())
updated_items = local_items - existing_items
self.local_changes.extend(updated_items)
else:
self.local_changes.add_deleted(rel_path)
if remove_missing_local:
for path in self._committed_paths() - local_paths:
self.local_changes.add_deleted(path)
self.update_reftests()
if new_rev is not None:
self.rev = new_rev
self.url_base = url_base
def update_reftests(self):
reftest_nodes = self.reftest_nodes.copy()
for path, items in self.local_changes.reftest_nodes.iteritems():
reftest_nodes[path] |= items
#TODO: remove locally deleted files
tests = set()
for items in reftest_nodes.values():
tests |= set(item for item in items if not item.is_reference)
has_inbound = set()
for path, items in reftest_nodes.iteritems():
for item in items:
for ref_url, ref_type in item.references:
has_inbound.add(ref_url)
if self.local_changes.reftest_nodes:
target = self.local_changes
else:
target = self
#TODO: Warn if there exist unreachable reftest nodes
for path, items in reftest_nodes.iteritems():
for item in items:
if item.url in has_inbound:
continue
target._data["reftest"][path].add(item)
def to_json(self):
out_items = {
item_type: sorted(
test.to_json()
for _, tests in items.iteritems()
for test in tests
)
for item_type, items in self._data.iteritems()
}
reftest_nodes = {key:[v.to_json() for v in value]
for key, value in self.reftest_nodes.iteritems()}
rv = {"url_base": self.url_base,
"rev": self.rev,
"local_changes": self.local_changes.to_json(),
"items": out_items,
"reftest_nodes": reftest_nodes,
"version": CURRENT_VERSION}
return rv
@classmethod
def from_json(cls, tests_root, obj):
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(git_rev=obj["rev"],
url_base=obj.get("url_base", "/"))
if not hasattr(obj, "iteritems"):
raise ManifestError
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"manual": ManualTest,
"stub": Stub,
"wdspec": WebdriverSpecTest}
source_files = {}
for k, values in obj["items"].iteritems():
if k not in item_types:
raise ManifestError
for v in values:
manifest_item = item_classes[k].from_json(self, tests_root, v,
source_files=source_files)
self._add(manifest_item)
for path, values in obj["reftest_nodes"].iteritems():
for v in values:
item = RefTest.from_json(self, tests_root, v,
source_files=source_files)
self.reftest_nodes[path].add(item)
self.reftest_nodes_by_url[v["url"]] = item
self.local_changes = LocalChanges.from_json(self,
tests_root,
obj["local_changes"],
source_files=source_files)
return self
class LocalChanges(object):
def __init__(self, manifest):
self.manifest = manifest
self._data = dict((item_type, defaultdict(set)) for item_type in item_types)
self._deleted = set()
self.reftest_nodes = defaultdict(set)
self.reftest_nodes_by_url = {}
def add(self, item):
if item is None:
return
is_reference = False
if isinstance(item, RefTest):
self.reftest_nodes[item.path].add(item)
self.reftest_nodes_by_url[item.url] = item
is_reference = item.is_reference
if not is_reference:
self._add(item)
item.manifest = self.manifest
def _add(self, item):
self._data[item.item_type][item.path].add(item)
def extend(self, items):
for item in items:
self.add(item)
def add_deleted(self, path):
self._deleted.add(path)
def is_deleted(self, path):
return path in self._deleted
def itertypes(self, *types):
for item_type in types:
yield item_type, self._data[item_type]
def iterdeleted(self):
for item in self._deleted:
yield item
def __getitem__(self, item_type):
return self._data[item_type]
def to_json(self):
reftest_nodes = {key:[v.to_json() for v in value]
for key, value in self.reftest_nodes.iteritems()}
rv = {"items": defaultdict(dict),
"reftest_nodes": reftest_nodes,
"deleted": []}
rv["deleted"].extend(self._deleted)
for test_type, paths in self._data.iteritems():
for path, tests in paths.iteritems():
rv["items"][test_type][path] = [test.to_json() for test in tests]
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
self = cls(manifest)
if not hasattr(obj, "iteritems"):
raise ManifestError
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"manual": ManualTest,
"stub": Stub,
"wdspec": WebdriverSpecTest}
for test_type, paths in obj["items"].iteritems():
for path, tests in paths.iteritems():
for test in tests:
manifest_item = item_classes[test_type].from_json(manifest,
tests_root,
test,
source_files=source_files)
self.add(manifest_item)
for path, values in obj["reftest_nodes"].iteritems():
for v in values:
item = RefTest.from_json(self.manifest, tests_root, v,
source_files=source_files)
self.reftest_nodes[path].add(item)
self.reftest_nodes_by_url[item.url] = item
for item in obj["deleted"]:
self.add_deleted(item)
return self
def load(tests_root, manifest):
logger = get_logger()
# "manifest" is a path or file-like object.
if isinstance(manifest, basestring):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with open(manifest) as f:
rv = Manifest.from_json(tests_root, json.load(f))
except IOError:
rv = Manifest(None)
return rv
return Manifest.from_json(tests_root, json.load(manifest))
def write(manifest, manifest_path):
with open(manifest_path, "w") as f:
json.dump(manifest.to_json(), f, sort_keys=True, indent=2, separators=(',', ': '))
f.write("\n")
| mpl-2.0 |
ddico/odoo | addons/payment/models/payment_acquirer.py | 1 | 59190 | # coding: utf-8
from collections import defaultdict
import hashlib
import hmac
import logging
from datetime import datetime
from dateutil import relativedelta
import pprint
from odoo import api, exceptions, fields, models, _, SUPERUSER_ID
from odoo.tools import consteq, float_round, image_process, ustr
from odoo.exceptions import ValidationError
from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools.misc import formatLang
from odoo.http import request
_logger = logging.getLogger(__name__)
def _partner_format_address(address1=False, address2=False):
return ' '.join((address1 or '', address2 or '')).strip()
def _partner_split_name(partner_name):
return [' '.join(partner_name.split()[:-1]), ' '.join(partner_name.split()[-1:])]
def create_missing_journal_for_acquirers(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
env['payment.acquirer']._create_missing_journal_for_acquirers()
class PaymentAcquirer(models.Model):
""" Acquirer Model. Each specific acquirer can extend the model by adding
its own fields, using the acquirer_name as a prefix for the new fields.
Using the required_if_provider='<name>' attribute on fields it is possible
to have required fields that depend on a specific acquirer.
Each acquirer has a link to an ir.ui.view record that is a template of
a button used to display the payment form. See examples in ``payment_ingenico``
and ``payment_paypal`` modules.
Methods that should be added in an acquirer-specific implementation:
- ``<name>_form_generate_values(self, reference, amount, currency,
partner_id=False, partner_values=None, tx_custom_values=None)``:
method that generates the values used to render the form button template.
- ``<name>_get_form_action_url(self):``: method that returns the url of
the button form. It is used for example in ecommerce application if you
want to post some data to the acquirer.
- ``<name>_compute_fees(self, amount, currency_id, country_id)``: computes
the fees of the acquirer, using generic fields defined on the acquirer
model (see fields definition).
Each acquirer should also define controllers to handle communication between
OpenERP and the acquirer. It generally consists in return urls given to the
button form and that the acquirer uses to send the customer back after the
transaction, with transaction details given as a POST request.
"""
_name = 'payment.acquirer'
_description = 'Payment Acquirer'
_order = 'module_state, state, sequence, name'
def _valid_field_parameter(self, field, name):
return name == 'required_if_provider' or super()._valid_field_parameter(field, name)
def _get_default_view_template_id(self):
return self.env.ref('payment.default_acquirer_button', raise_if_not_found=False)
name = fields.Char('Name', required=True, translate=True)
color = fields.Integer('Color', compute='_compute_color', store=True)
display_as = fields.Char('Displayed as', translate=True, help="How the acquirer is displayed to the customers.")
description = fields.Html('Description')
sequence = fields.Integer('Sequence', default=10, help="Determine the display order")
provider = fields.Selection(
selection=[('manual', 'Custom Payment Form')], string='Provider',
default='manual', required=True)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env.company.id, required=True)
view_template_id = fields.Many2one(
'ir.ui.view', 'Form Button Template',
default=_get_default_view_template_id,
help="This template renders the acquirer button with all necessary values.\n"
"It is rendered with qWeb with the following evaluation context:\n"
"tx_url: transaction URL to post the form\n"
"acquirer: payment.acquirer browse record\n"
"user: current user browse record\n"
"reference: the transaction reference number\n"
"currency: the transaction currency browse record\n"
"amount: the transaction amount, a float\n"
"partner: the buyer partner browse record, not necessarily set\n"
"partner_values: specific values about the buyer, for example coming from a shipping form\n"
"tx_values: transaction values\n"
"context: the current context dictionary")
registration_view_template_id = fields.Many2one(
'ir.ui.view', 'S2S Form Template', domain=[('type', '=', 'qweb')],
help="Template for method registration")
state = fields.Selection([
('disabled', 'Disabled'),
('enabled', 'Enabled'),
('test', 'Test Mode')], required=True, default='disabled', copy=False,
help="""In test mode, a fake payment is processed through a test
payment interface. This mode is advised when setting up the
acquirer. Watch out, test and production modes require
different credentials.""")
capture_manually = fields.Boolean(string="Capture Amount Manually",
help="Capture the amount from Odoo, when the delivery is completed.")
journal_id = fields.Many2one(
'account.journal', 'Payment Journal', domain="[('type', 'in', ['bank', 'cash']), ('company_id', '=', company_id)]",
help="""Journal where the successful transactions will be posted""")
check_validity = fields.Boolean(string="Verify Card Validity",
help="""Trigger a transaction of 1 currency unit and its refund to check the validity of new credit cards entered in the customer portal.
Without this check, the validity will be verified at the very first transaction.""")
country_ids = fields.Many2many(
'res.country', 'payment_country_rel',
'payment_id', 'country_id', 'Countries',
help="This payment gateway is available for selected countries. If none is selected it is available for all countries.")
pre_msg = fields.Html(
'Help Message', translate=True,
help='Message displayed to explain and help the payment process.')
auth_msg = fields.Html(
'Authorize Message', translate=True,
default=lambda s: _('Your payment has been authorized.'),
help='Message displayed if payment is authorized.')
pending_msg = fields.Html(
'Pending Message', translate=True,
default=lambda s: _('Your payment has been successfully processed but is waiting for approval.'),
help='Message displayed, if order is in pending state after having done the payment process.')
done_msg = fields.Html(
'Done Message', translate=True,
default=lambda s: _('Your payment has been successfully processed. Thank you!'),
help='Message displayed, if order is done successfully after having done the payment process.')
cancel_msg = fields.Html(
'Cancel Message', translate=True,
default=lambda s: _('Your payment has been cancelled.'),
help='Message displayed, if order is cancel during the payment process.')
save_token = fields.Selection([
('none', 'Never'),
('ask', 'Let the customer decide'),
('always', 'Always')],
string='Save Cards', default='none',
help="This option allows customers to save their credit card as a payment token and to reuse it for a later purchase. "
"If you manage subscriptions (recurring invoicing), you need it to automatically charge the customer when you "
"issue an invoice.")
token_implemented = fields.Boolean('Saving Card Data supported', compute='_compute_feature_support', search='_search_is_tokenized')
authorize_implemented = fields.Boolean('Authorize Mechanism Supported', compute='_compute_feature_support')
fees_implemented = fields.Boolean('Fees Computation Supported', compute='_compute_feature_support')
fees_active = fields.Boolean('Add Extra Fees')
fees_dom_fixed = fields.Float('Fixed domestic fees')
fees_dom_var = fields.Float('Variable domestic fees (in percents)')
fees_int_fixed = fields.Float('Fixed international fees')
fees_int_var = fields.Float('Variable international fees (in percents)')
qr_code = fields.Boolean('Enable QR Codes', help="Enable the use of QR-codes for payments made on this provider.")
# TDE FIXME: remove that brol
module_id = fields.Many2one('ir.module.module', string='Corresponding Module')
module_state = fields.Selection(string='Installation State', related='module_id.state', store=True)
module_to_buy = fields.Boolean(string='Odoo Enterprise Module', related='module_id.to_buy', readonly=True, store=False)
image_128 = fields.Image("Image", max_width=128, max_height=128)
payment_icon_ids = fields.Many2many('payment.icon', string='Supported Payment Icons')
payment_flow = fields.Selection(selection=[('form', 'Redirection to the acquirer website'),
('s2s','Payment from Odoo')],
default='form', required=True, string='Payment Flow',
help="""Note: Subscriptions does not take this field in account, it uses server to server by default.""")
inbound_payment_method_ids = fields.Many2many('account.payment.method', related='journal_id.inbound_payment_method_ids', readonly=False)
@api.onchange('payment_flow')
def _onchange_payment_flow(self):
electronic = self.env.ref('payment.account_payment_method_electronic_in')
if self.token_implemented and self.payment_flow == 's2s':
if electronic not in self.inbound_payment_method_ids:
self.inbound_payment_method_ids = [(4, electronic.id)]
elif electronic in self.inbound_payment_method_ids:
self.inbound_payment_method_ids = [(2, electronic.id)]
@api.onchange('state')
def onchange_state(self):
"""Disable dashboard display for test acquirer journal."""
self.journal_id.update({'show_on_dashboard': self.state == 'enabled'})
def _search_is_tokenized(self, operator, value):
tokenized = self._get_feature_support()['tokenize']
if (operator, value) in [('=', True), ('!=', False)]:
return [('provider', 'in', tokenized)]
return [('provider', 'not in', tokenized)]
@api.depends('provider')
def _compute_feature_support(self):
feature_support = self._get_feature_support()
for acquirer in self:
acquirer.fees_implemented = acquirer.provider in feature_support['fees']
acquirer.authorize_implemented = acquirer.provider in feature_support['authorize']
acquirer.token_implemented = acquirer.provider in feature_support['tokenize']
@api.depends('state', 'module_state')
def _compute_color(self):
for acquirer in self:
if acquirer.module_id and not acquirer.module_state == 'installed':
acquirer.color = 4 # blue
elif acquirer.state == 'disabled':
acquirer.color = 3 # yellow
elif acquirer.state == 'test':
acquirer.color = 2 # orange
elif acquirer.state == 'enabled':
acquirer.color = 7 # green
def _check_required_if_provider(self):
""" If the field has 'required_if_provider="<provider>"' attribute, then it
required if record.provider is <provider>. """
field_names = []
enabled_acquirers = self.filtered(lambda acq: acq.state in ['enabled', 'test'])
for k, f in self._fields.items():
provider = getattr(f, 'required_if_provider', None)
if provider and any(
acquirer.provider == provider and not acquirer[k]
for acquirer in enabled_acquirers
):
ir_field = self.env['ir.model.fields']._get(self._name, k)
field_names.append(ir_field.field_description)
if field_names:
raise ValidationError(_("Required fields not filled: %s") % ", ".join(field_names))
def get_base_url(self):
self.ensure_one()
# priority is always given to url_root
# from the request
url = ''
if request:
url = request.httprequest.url_root
if not url and 'website_id' in self and self.website_id:
url = self.website_id._get_http_domain()
return url or self.env['ir.config_parameter'].sudo().get_param('web.base.url')
def _get_feature_support(self):
"""Get advanced feature support by provider.
Each provider should add its technical in the corresponding
key for the following features:
* fees: support payment fees computations
* authorize: support authorizing payment (separates
authorization and capture)
* tokenize: support saving payment data in a payment.tokenize
object
"""
return dict(authorize=[], tokenize=[], fees=[])
def _prepare_account_journal_vals(self):
'''Prepare the values to create the acquirer's journal.
:return: a dictionary to create a account.journal record.
'''
self.ensure_one()
account_vals = self.company_id.chart_template_id._prepare_transfer_account_for_direct_creation(self.name, self.company_id)
account = self.env['account.account'].create(account_vals)
inbound_payment_method_ids = []
if self.token_implemented and self.payment_flow == 's2s':
inbound_payment_method_ids.append((4, self.env.ref('payment.account_payment_method_electronic_in').id))
return {
'name': self.name,
'code': self.name.upper(),
'sequence': 999,
'type': 'bank',
'company_id': self.company_id.id,
'default_debit_account_id': account.id,
'default_credit_account_id': account.id,
# Show the journal on dashboard if the acquirer is published on the website.
'show_on_dashboard': self.state == 'enabled',
# Don't show payment methods in the backend.
'inbound_payment_method_ids': inbound_payment_method_ids,
'outbound_payment_method_ids': [],
}
@api.model
def _create_missing_journal_for_acquirers(self, company=None):
'''Create the journal for active acquirers.
We want one journal per acquirer. However, we can't create them during the 'create' of the payment.acquirer
because every acquirers are defined on the 'payment' module but is active only when installing their own module
(e.g. payment_paypal for Paypal). We can't do that in such modules because we have no guarantee the chart template
is already installed.
'''
# Search for installed acquirers modules.
# If this method is triggered by a post_init_hook, the module is 'to install'.
# If the trigger comes from the chart template wizard, the modules are already installed.
acquirer_modules = self.env['ir.module.module'].search(
[('name', 'like', 'payment_%'), ('state', 'in', ('to install', 'installed'))])
acquirer_names = [a.name.split('_', 1)[1] for a in acquirer_modules]
# Search for acquirers having no journal
company = company or self.env.company
acquirers = self.env['payment.acquirer'].search(
[('provider', 'in', acquirer_names), ('journal_id', '=', False), ('company_id', '=', company.id)])
journals = self.env['account.journal']
for acquirer in acquirers.filtered(lambda l: not l.journal_id and l.company_id.chart_template_id):
acquirer.journal_id = self.env['account.journal'].create(acquirer._prepare_account_journal_vals())
journals += acquirer.journal_id
return journals
@api.model
def create(self, vals):
record = super(PaymentAcquirer, self).create(vals)
record._check_required_if_provider()
return record
def write(self, vals):
result = super(PaymentAcquirer, self).write(vals)
self._check_required_if_provider()
return result
def get_acquirer_extra_fees(self, amount, currency_id, country_id):
extra_fees = {
'currency_id': currency_id
}
acquirers = self.filtered(lambda x: x.fees_active)
for acq in acquirers:
custom_method_name = '%s_compute_fees' % acq.provider
if hasattr(acq, custom_method_name):
fees = getattr(acq, custom_method_name)(amount, currency_id, country_id)
extra_fees[acq] = fees
return extra_fees
def get_form_action_url(self):
""" Returns the form action URL, for form-based acquirer implementations. """
if hasattr(self, '%s_get_form_action_url' % self.provider):
return getattr(self, '%s_get_form_action_url' % self.provider)()
return False
def _get_available_payment_input(self, partner=None, company=None):
""" Generic (model) method that fetches available payment mechanisms
to use in all portal / eshop pages that want to use the payment form.
It contains
* acquirers: record set of both form and s2s acquirers;
* pms: record set of stored credit card data (aka payment.token)
connected to a given partner to allow customers to reuse them """
if not company:
company = self.env.company
if not partner:
partner = self.env.user.partner_id
active_acquirers = self.search([('state', 'in', ['enabled', 'test']), ('company_id', '=', company.id)])
acquirers = active_acquirers.filtered(lambda acq: (acq.payment_flow == 'form' and acq.view_template_id) or
(acq.payment_flow == 's2s' and acq.registration_view_template_id))
return {
'acquirers': acquirers,
'pms': self.env['payment.token'].search([
('partner_id', '=', partner.id),
('acquirer_id', 'in', acquirers.ids)]),
}
def render(self, reference, amount, currency_id, partner_id=False, values=None):
""" Renders the form template of the given acquirer as a qWeb template.
:param string reference: the transaction reference
:param float amount: the amount the buyer has to pay
:param currency_id: currency id
:param dict partner_id: optional partner_id to fill values
:param dict values: a dictionary of values for the transction that is
given to the acquirer-specific method generating the form values
All templates will receive:
- acquirer: the payment.acquirer browse record
- user: the current user browse record
- currency_id: id of the transaction currency
- amount: amount of the transaction
- reference: reference of the transaction
- partner_*: partner-related values
- partner: optional partner browse record
- 'feedback_url': feedback URL, controler that manage answer of the acquirer (without base url) -> FIXME
- 'return_url': URL for coming back after payment validation (wihout base url) -> FIXME
- 'cancel_url': URL if the client cancels the payment -> FIXME
- 'error_url': URL if there is an issue with the payment -> FIXME
- context: Odoo context
"""
if values is None:
values = {}
if not self.view_template_id:
return None
values.setdefault('return_url', '/payment/process')
# reference and amount
values.setdefault('reference', reference)
amount = float_round(amount, 2)
values.setdefault('amount', amount)
# currency id
currency_id = values.setdefault('currency_id', currency_id)
if currency_id:
currency = self.env['res.currency'].browse(currency_id)
else:
currency = self.env.company.currency_id
values['currency'] = currency
# Fill partner_* using values['partner_id'] or partner_id argument
partner_id = values.get('partner_id', partner_id)
billing_partner_id = values.get('billing_partner_id', partner_id)
if partner_id:
partner = self.env['res.partner'].browse(partner_id)
if partner_id != billing_partner_id:
billing_partner = self.env['res.partner'].browse(billing_partner_id)
else:
billing_partner = partner
values.update({
'partner': partner,
'partner_id': partner_id,
'partner_name': partner.name,
'partner_lang': partner.lang,
'partner_email': partner.email,
'partner_zip': partner.zip,
'partner_city': partner.city,
'partner_address': _partner_format_address(partner.street, partner.street2),
'partner_country_id': partner.country_id.id or self.env['res.company']._company_default_get().country_id.id,
'partner_country': partner.country_id,
'partner_phone': partner.phone,
'partner_state': partner.state_id,
'billing_partner': billing_partner,
'billing_partner_id': billing_partner_id,
'billing_partner_name': billing_partner.name,
'billing_partner_commercial_company_name': billing_partner.commercial_company_name,
'billing_partner_lang': billing_partner.lang,
'billing_partner_email': billing_partner.email,
'billing_partner_zip': billing_partner.zip,
'billing_partner_city': billing_partner.city,
'billing_partner_address': _partner_format_address(billing_partner.street, billing_partner.street2),
'billing_partner_country_id': billing_partner.country_id.id,
'billing_partner_country': billing_partner.country_id,
'billing_partner_phone': billing_partner.phone,
'billing_partner_state': billing_partner.state_id,
})
if values.get('partner_name'):
values.update({
'partner_first_name': _partner_split_name(values.get('partner_name'))[0],
'partner_last_name': _partner_split_name(values.get('partner_name'))[1],
})
if values.get('billing_partner_name'):
values.update({
'billing_partner_first_name': _partner_split_name(values.get('billing_partner_name'))[0],
'billing_partner_last_name': _partner_split_name(values.get('billing_partner_name'))[1],
})
# Fix address, country fields
if not values.get('partner_address'):
values['address'] = _partner_format_address(values.get('partner_street', ''), values.get('partner_street2', ''))
if not values.get('partner_country') and values.get('partner_country_id'):
values['country'] = self.env['res.country'].browse(values.get('partner_country_id'))
if not values.get('billing_partner_address'):
values['billing_address'] = _partner_format_address(values.get('billing_partner_street', ''), values.get('billing_partner_street2', ''))
if not values.get('billing_partner_country') and values.get('billing_partner_country_id'):
values['billing_country'] = self.env['res.country'].browse(values.get('billing_partner_country_id'))
# compute fees
fees_method_name = '%s_compute_fees' % self.provider
if hasattr(self, fees_method_name):
fees = getattr(self, fees_method_name)(values['amount'], values['currency_id'], values.get('partner_country_id'))
values['fees'] = float_round(fees, 2)
# call <name>_form_generate_values to update the tx dict with acqurier specific values
cust_method_name = '%s_form_generate_values' % (self.provider)
if hasattr(self, cust_method_name):
method = getattr(self, cust_method_name)
values = method(values)
values.update({
'tx_url': self._context.get('tx_url', self.get_form_action_url()),
'submit_class': self._context.get('submit_class', 'btn btn-link'),
'submit_txt': self._context.get('submit_txt'),
'acquirer': self,
'user': self.env.user,
'context': self._context,
'type': values.get('type') or 'form',
})
_logger.info('payment.acquirer.render: <%s> values rendered for form payment:\n%s', self.provider, pprint.pformat(values))
return self.view_template_id._render(values, engine='ir.qweb')
def get_s2s_form_xml_id(self):
if self.registration_view_template_id:
model_data = self.env['ir.model.data'].search([('model', '=', 'ir.ui.view'), ('res_id', '=', self.registration_view_template_id.id)])
return ('%s.%s') % (model_data.module, model_data.name)
return False
def s2s_process(self, data):
cust_method_name = '%s_s2s_form_process' % (self.provider)
if not self.s2s_validate(data):
return False
if hasattr(self, cust_method_name):
# As this method may be called in JSON and overridden in various addons
# let us raise interesting errors before having stranges crashes
if not data.get('partner_id'):
raise ValueError(_('Missing partner reference when trying to create a new payment token'))
method = getattr(self, cust_method_name)
return method(data)
return True
def s2s_validate(self, data):
cust_method_name = '%s_s2s_form_validate' % (self.provider)
if hasattr(self, cust_method_name):
method = getattr(self, cust_method_name)
return method(data)
return True
def button_immediate_install(self):
# TDE FIXME: remove that brol
if self.module_id and self.module_state != 'installed':
self.module_id.button_immediate_install()
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
class PaymentIcon(models.Model):
_name = 'payment.icon'
_description = 'Payment Icon'
name = fields.Char(string='Name')
acquirer_ids = fields.Many2many('payment.acquirer', string="Acquirers", help="List of Acquirers supporting this payment icon.")
image = fields.Binary(
"Image", help="This field holds the image used for this payment icon, limited to 1024x1024px")
image_payment_form = fields.Binary(
"Image displayed on the payment form", attachment=True)
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
if 'image' in vals:
image = ustr(vals['image'] or '').encode('utf-8')
vals['image_payment_form'] = image_process(image, size=(45,30))
vals['image'] = image_process(image, size=(64,64))
return super(PaymentIcon, self).create(vals_list)
def write(self, vals):
if 'image' in vals:
image = ustr(vals['image'] or '').encode('utf-8')
vals['image_payment_form'] = image_process(image, size=(45,30))
vals['image'] = image_process(image, size=(64,64))
return super(PaymentIcon, self).write(vals)
class PaymentTransaction(models.Model):
""" Transaction Model. Each specific acquirer can extend the model by adding
its own fields.
Methods that can be added in an acquirer-specific implementation:
- ``<name>_create``: method receiving values used when creating a new
transaction and that returns a dictionary that will update those values.
This method can be used to tweak some transaction values.
Methods defined for convention, depending on your controllers:
- ``<name>_form_feedback(self, data)``: method that handles the data coming
from the acquirer after the transaction. It will generally receives data
posted by the acquirer after the transaction.
"""
_name = 'payment.transaction'
_description = 'Payment Transaction'
_order = 'id desc'
_rec_name = 'reference'
@api.model
def _lang_get(self):
return self.env['res.lang'].get_installed()
@api.model
def _get_default_partner_country_id(self):
return self.env.company.country_id.id
date = fields.Datetime('Validation Date', readonly=True)
acquirer_id = fields.Many2one('payment.acquirer', string='Acquirer', readonly=True, required=True)
provider = fields.Selection(string='Provider', related='acquirer_id.provider', readonly=True)
type = fields.Selection([
('validation', 'Validation of the bank card'),
('server2server', 'Server To Server'),
('form', 'Form'),
('form_save', 'Form with tokenization')], 'Type',
default='form', required=True, readonly=True)
state = fields.Selection([
('draft', 'Draft'),
('pending', 'Pending'),
('authorized', 'Authorized'),
('done', 'Done'),
('cancel', 'Canceled'),
('error', 'Error'),],
string='Status', copy=False, default='draft', required=True, readonly=True)
state_message = fields.Text(string='Message', readonly=True,
help='Field used to store error and/or validation messages for information')
amount = fields.Monetary(string='Amount', currency_field='currency_id', required=True, readonly=True)
fees = fields.Monetary(string='Fees', currency_field='currency_id', readonly=True,
help='Fees amount; set by the system because depends on the acquirer')
currency_id = fields.Many2one('res.currency', 'Currency', required=True, readonly=True)
reference = fields.Char(string='Reference', required=True, readonly=True, index=True,
help='Internal reference of the TX')
acquirer_reference = fields.Char(string='Acquirer Reference', readonly=True, help='Reference of the TX as stored in the acquirer database')
# duplicate partner / transaction data to store the values at transaction time
partner_id = fields.Many2one('res.partner', 'Customer')
partner_name = fields.Char('Partner Name')
partner_lang = fields.Selection(_lang_get, 'Language', default=lambda self: self.env.lang)
partner_email = fields.Char('Email')
partner_zip = fields.Char('Zip')
partner_address = fields.Char('Address')
partner_city = fields.Char('City')
partner_country_id = fields.Many2one('res.country', 'Country', default=_get_default_partner_country_id, required=True)
partner_phone = fields.Char('Phone')
html_3ds = fields.Char('3D Secure HTML')
callback_model_id = fields.Many2one('ir.model', 'Callback Document Model', groups="base.group_system")
callback_res_id = fields.Integer('Callback Document ID', groups="base.group_system")
callback_method = fields.Char('Callback Method', groups="base.group_system")
callback_hash = fields.Char('Callback Hash', groups="base.group_system")
# Fields used for user redirection & payment post processing
return_url = fields.Char('Return URL after payment')
is_processed = fields.Boolean('Has the payment been post processed', default=False)
# Fields used for payment.transaction traceability.
payment_token_id = fields.Many2one('payment.token', 'Payment Token', readonly=True,
domain="[('acquirer_id', '=', acquirer_id)]")
payment_id = fields.Many2one('account.payment', string='Payment', readonly=True)
invoice_ids = fields.Many2many('account.move', 'account_invoice_transaction_rel', 'transaction_id', 'invoice_id',
string='Invoices', copy=False, readonly=True,
domain=[('move_type', 'in', ('out_invoice', 'out_refund', 'in_invoice', 'in_refund'))])
invoice_ids_nbr = fields.Integer(compute='_compute_invoice_ids_nbr', string='# of Invoices')
_sql_constraints = [
('reference_uniq', 'unique(reference)', 'Reference must be unique!'),
]
@api.depends('invoice_ids')
def _compute_invoice_ids_nbr(self):
for trans in self:
trans.invoice_ids_nbr = len(trans.invoice_ids)
def _create_payment(self, add_payment_vals={}):
''' Create an account.payment record for the current payment.transaction.
If the transaction is linked to some invoices, the reconciliation will be done automatically.
:param add_payment_vals: Optional additional values to be passed to the account.payment.create method.
:return: An account.payment record.
'''
self.ensure_one()
payment_vals = {
'amount': self.amount,
'payment_type': 'inbound' if self.amount > 0 else 'outbound',
'currency_id': self.currency_id.id,
'partner_id': self.partner_id.id,
'partner_type': 'customer',
'journal_id': self.acquirer_id.journal_id.id,
'company_id': self.acquirer_id.company_id.id,
'payment_method_id': self.env.ref('payment.account_payment_method_electronic_in').id,
'payment_token_id': self.payment_token_id and self.payment_token_id.id or None,
'payment_transaction_id': self.id,
'ref': self.reference,
**add_payment_vals,
}
payment = self.env['account.payment'].create(payment_vals)
payment.action_post()
# Track the payment to make a one2one.
self.payment_id = payment
if self.invoice_ids:
self.invoice_ids.filtered(lambda move: move.state == 'draft').post()
(payment.line_ids + self.invoice_ids.line_ids)\
.filtered(lambda line: line.account_id == payment.destination_account_id and not line.reconciled)\
.reconcile()
return payment
def get_last_transaction(self):
transactions = self.filtered(lambda t: t.state != 'draft')
return transactions and transactions[0] or transactions
def _get_processing_info(self):
""" Extensible method for providers if they need specific fields/info regarding a tx in the payment processing page. """
return dict()
def _get_payment_transaction_sent_message(self):
self.ensure_one()
if self.payment_token_id:
message = _('A transaction %s with %s initiated using %s credit card.')
message_vals = (self.reference, self.acquirer_id.name, self.payment_token_id.name)
elif self.provider in ('manual', 'transfer'):
message = _('The customer has selected %s to pay this document.')
message_vals = (self.acquirer_id.name)
else:
message = _('A transaction %s with %s initiated.')
message_vals = (self.reference, self.acquirer_id.name)
if self.provider not in ('manual', 'transfer'):
message += ' ' + _('Waiting for payment confirmation...')
return message % message_vals
def _get_payment_transaction_received_message(self):
self.ensure_one()
amount = formatLang(self.env, self.amount, currency_obj=self.currency_id)
message_vals = [self.reference, self.acquirer_id.name, amount]
if self.state == 'pending':
message = _('The transaction %s with %s for %s is pending.')
elif self.state == 'authorized':
message = _('The transaction %s with %s for %s has been authorized. Waiting for capture...')
elif self.state == 'done':
message = _('The transaction %s with %s for %s has been confirmed. The related payment is posted: %s')
message_vals.append(self.payment_id._get_payment_chatter_link())
elif self.state == 'cancel' and self.state_message:
message = _('The transaction %s with %s for %s has been cancelled with the following message: %s')
message_vals.append(self.state_message)
elif self.state == 'error' and self.state_message:
message = _('The transaction %s with %s for %s has return failed with the following error message: %s')
message_vals.append(self.state_message)
else:
message = _('The transaction %s with %s for %s has been cancelled.')
return message % tuple(message_vals)
def _log_payment_transaction_sent(self):
'''Log the message saying the transaction has been sent to the remote server to be
processed by the acquirer.
'''
for trans in self:
post_message = trans._get_payment_transaction_sent_message()
for inv in trans.invoice_ids:
inv.message_post(body=post_message)
def _log_payment_transaction_received(self):
'''Log the message saying a response has been received from the remote server and some
additional informations like the old/new state, the reference of the payment... etc.
:param old_state: The state of the transaction before the response.
:param add_messages: Optional additional messages to log like the capture status.
'''
for trans in self.filtered(lambda t: t.provider not in ('manual', 'transfer')):
post_message = trans._get_payment_transaction_received_message()
for inv in trans.invoice_ids:
inv.message_post(body=post_message)
def _filter_transaction_state(self, allowed_states, target_state):
"""Divide a set of transactions according to their state.
:param tuple(string) allowed_states: tuple of allowed states for the target state (strings)
:param string target_state: target state for the filtering
:return: tuple of transactions divided by their state, in that order
tx_to_process: tx that were in the allowed states
tx_already_processed: tx that were already in the target state
tx_wrong_state: tx that were not in the allowed state for the transition
:rtype: tuple(recordset)
"""
tx_to_process = self.filtered(lambda tx: tx.state in allowed_states)
tx_already_processed = self.filtered(lambda tx: tx.state == target_state)
tx_wrong_state = self -tx_to_process - tx_already_processed
return (tx_to_process, tx_already_processed, tx_wrong_state)
def _set_transaction_pending(self):
'''Move the transaction to the pending state(e.g. Wire Transfer).'''
allowed_states = ('draft',)
target_state = 'pending'
(tx_to_process, tx_already_processed, tx_wrong_state) = self._filter_transaction_state(allowed_states, target_state)
for tx in tx_already_processed:
_logger.info('Trying to write the same state twice on tx (ref: %s, state: %s' % (tx.reference, tx.state))
for tx in tx_wrong_state:
_logger.warning('Processed tx with abnormal state (ref: %s, target state: %s, previous state %s, expected previous states: %s)' % (tx.reference, target_state, tx.state, allowed_states))
tx_to_process.write({
'state': target_state,
'date': fields.Datetime.now(),
'state_message': '',
})
tx_to_process._log_payment_transaction_received()
def _set_transaction_authorized(self):
'''Move the transaction to the authorized state(e.g. Authorize).'''
allowed_states = ('draft', 'pending')
target_state = 'authorized'
(tx_to_process, tx_already_processed, tx_wrong_state) = self._filter_transaction_state(allowed_states, target_state)
for tx in tx_already_processed:
_logger.info('Trying to write the same state twice on tx (ref: %s, state: %s' % (tx.reference, tx.state))
for tx in tx_wrong_state:
_logger.warning('Processed tx with abnormal state (ref: %s, target state: %s, previous state %s, expected previous states: %s)' % (tx.reference, target_state, tx.state, allowed_states))
tx_to_process.write({
'state': target_state,
'date': fields.Datetime.now(),
'state_message': '',
})
tx_to_process._log_payment_transaction_received()
def _set_transaction_done(self):
'''Move the transaction's payment to the done state(e.g. Paypal).'''
allowed_states = ('draft', 'authorized', 'pending', 'error')
target_state = 'done'
(tx_to_process, tx_already_processed, tx_wrong_state) = self._filter_transaction_state(allowed_states, target_state)
for tx in tx_already_processed:
_logger.info('Trying to write the same state twice on tx (ref: %s, state: %s' % (tx.reference, tx.state))
for tx in tx_wrong_state:
_logger.warning('Processed tx with abnormal state (ref: %s, target state: %s, previous state %s, expected previous states: %s)' % (tx.reference, target_state, tx.state, allowed_states))
tx_to_process.write({
'state': target_state,
'date': fields.Datetime.now(),
'state_message': '',
})
def _reconcile_after_transaction_done(self):
# Validate invoices automatically upon the transaction is posted.
invoices = self.mapped('invoice_ids').filtered(lambda inv: inv.state == 'draft')
invoices.post()
# Create & Post the payments.
for trans in self:
if trans.payment_id:
continue
trans._create_payment()
def _set_transaction_cancel(self):
'''Move the transaction's payment to the cancel state(e.g. Paypal).'''
allowed_states = ('draft', 'authorized')
target_state = 'cancel'
(tx_to_process, tx_already_processed, tx_wrong_state) = self._filter_transaction_state(allowed_states, target_state)
for tx in tx_already_processed:
_logger.info('Trying to write the same state twice on tx (ref: %s, state: %s' % (tx.reference, tx.state))
for tx in tx_wrong_state:
_logger.warning('Processed tx with abnormal state (ref: %s, target state: %s, previous state %s, expected previous states: %s)' % (tx.reference, target_state, tx.state, allowed_states))
# Cancel the existing payments.
tx_to_process.mapped('payment_id').action_cancel()
tx_to_process.write({'state': target_state, 'date': fields.Datetime.now()})
tx_to_process._log_payment_transaction_received()
def _set_transaction_error(self, msg):
'''Move the transaction to the error state (Third party returning error e.g. Paypal).'''
allowed_states = ('draft', 'authorized', 'pending')
target_state = 'error'
(tx_to_process, tx_already_processed, tx_wrong_state) = self._filter_transaction_state(allowed_states, target_state)
for tx in tx_already_processed:
_logger.info('Trying to write the same state twice on tx (ref: %s, state: %s' % (tx.reference, tx.state))
for tx in tx_wrong_state:
_logger.warning('Processed tx with abnormal state (ref: %s, target state: %s, previous state %s, expected previous states: %s)' % (tx.reference, target_state, tx.state, allowed_states))
tx_to_process.write({
'state': target_state,
'date': fields.Datetime.now(),
'state_message': msg,
})
self._log_payment_transaction_received()
def _post_process_after_done(self):
self._reconcile_after_transaction_done()
self._log_payment_transaction_received()
self.write({'is_processed': True})
return True
def _cron_post_process_after_done(self):
if not self:
ten_minutes_ago = datetime.now() - relativedelta.relativedelta(minutes=10)
# we don't want to forever try to process a transaction that doesn't go through
retry_limit_date = datetime.now() - relativedelta.relativedelta(days=2)
# we retrieve all the payment tx that need to be post processed
self = self.search([('state', '=', 'done'),
('is_processed', '=', False),
('date', '<=', ten_minutes_ago),
('date', '>=', retry_limit_date),
])
for tx in self:
try:
tx._post_process_after_done()
self.env.cr.commit()
except Exception as e:
_logger.exception("Transaction post processing failed")
self.env.cr.rollback()
@api.model
def _compute_reference_prefix(self, values):
if values and values.get('invoice_ids'):
invoices = self.new({'invoice_ids': values['invoice_ids']}).invoice_ids
return ','.join(invoices.mapped('name'))
return None
@api.model
def _compute_reference(self, values=None, prefix=None):
'''Compute a unique reference for the transaction.
If prefix:
prefix-\d+
If some invoices:
<inv_number_0>.number,<inv_number_1>,...,<inv_number_n>-x
If some sale orders:
<so_name_0>.number,<so_name_1>,...,<so_name_n>-x
Else:
tx-\d+
:param values: values used to create a new transaction.
:param prefix: custom transaction prefix.
:return: A unique reference for the transaction.
'''
if not prefix:
if values:
prefix = self._compute_reference_prefix(values)
else:
prefix = 'tx'
# Fetch the last reference
# E.g. If the last reference is SO42-5, this query will return '-5'
self._cr.execute('''
SELECT CAST(SUBSTRING(reference FROM '-\d+$') AS INTEGER) AS suffix
FROM payment_transaction WHERE reference LIKE %s ORDER BY suffix
''', [prefix + '-%'])
query_res = self._cr.fetchone()
if query_res:
# Increment the last reference by one
suffix = '%s' % (-query_res[0] + 1)
else:
# Start a new indexing from 1
suffix = '1'
return '%s-%s' % (prefix, suffix)
def action_view_invoices(self):
action = {
'name': _('Invoices'),
'type': 'ir.actions.act_window',
'res_model': 'account.move',
'target': 'current',
}
invoice_ids = self.invoice_ids.ids
if len(invoice_ids) == 1:
invoice = invoice_ids[0]
action['res_id'] = invoice
action['view_mode'] = 'form'
form_view = [(self.env.ref('account.view_move_form').id, 'form')]
if 'views' in action:
action['views'] = form_view + [(state,view) for state,view in action['views'] if view != 'form']
else:
action['views'] = form_view
else:
action['view_mode'] = 'tree,form'
action['domain'] = [('id', 'in', invoice_ids)]
return action
@api.constrains('state', 'acquirer_id')
def _check_authorize_state(self):
failed_tx = self.filtered(lambda tx: tx.state == 'authorized' and tx.acquirer_id.provider not in self.env['payment.acquirer']._get_feature_support()['authorize'])
if failed_tx:
raise exceptions.ValidationError(_('The %s payment acquirers are not allowed to manual capture mode!' % failed_tx.mapped('acquirer_id.name')))
@api.model
def create(self, values):
# call custom create method if defined
acquirer = self.env['payment.acquirer'].browse(values['acquirer_id'])
if values.get('partner_id'):
partner = self.env['res.partner'].browse(values['partner_id'])
values.update({
'partner_name': partner.name,
'partner_lang': partner.lang or self.env.user.lang,
'partner_email': partner.email,
'partner_zip': partner.zip,
'partner_address': _partner_format_address(partner.street or '', partner.street2 or ''),
'partner_city': partner.city,
'partner_country_id': partner.country_id.id or self._get_default_partner_country_id(),
'partner_phone': partner.phone,
})
# compute fees
custom_method_name = '%s_compute_fees' % acquirer.provider
if hasattr(acquirer, custom_method_name):
fees = getattr(acquirer, custom_method_name)(
values.get('amount', 0.0), values.get('currency_id'), values['partner_country_id'])
values['fees'] = fees
# custom create
custom_method_name = '%s_create' % acquirer.provider
if hasattr(self, custom_method_name):
values.update(getattr(self, custom_method_name)(values))
if not values.get('reference'):
values['reference'] = self._compute_reference(values=values)
# Default value of reference is
tx = super(PaymentTransaction, self).create(values)
# Generate callback hash if it is configured on the tx; avoid generating unnecessary stuff
# (limited sudo env for checking callback presence, must work for manual transactions too)
tx_sudo = tx.sudo()
if tx_sudo.callback_model_id and tx_sudo.callback_res_id and tx_sudo.callback_method:
tx.write({'callback_hash': tx._generate_callback_hash()})
return tx
def _generate_callback_hash(self):
self.ensure_one()
secret = self.env['ir.config_parameter'].sudo().get_param('database.secret')
token = '%s%s%s' % (self.callback_model_id.model,
self.callback_res_id,
self.sudo().callback_method)
return hmac.new(secret.encode('utf-8'), token.encode('utf-8'), hashlib.sha256).hexdigest()
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def form_feedback(self, data, acquirer_name):
invalid_parameters, tx = None, None
tx_find_method_name = '_%s_form_get_tx_from_data' % acquirer_name
if hasattr(self, tx_find_method_name):
tx = getattr(self, tx_find_method_name)(data)
# TDE TODO: form_get_invalid_parameters from model to multi
invalid_param_method_name = '_%s_form_get_invalid_parameters' % acquirer_name
if hasattr(self, invalid_param_method_name):
invalid_parameters = getattr(tx, invalid_param_method_name)(data)
if invalid_parameters:
_error_message = '%s: incorrect tx data:\n' % (acquirer_name)
for item in invalid_parameters:
_error_message += '\t%s: received %s instead of %s\n' % (item[0], item[1], item[2])
_logger.error(_error_message)
return False
# TDE TODO: form_validate from model to multi
feedback_method_name = '_%s_form_validate' % acquirer_name
if hasattr(self, feedback_method_name):
return getattr(tx, feedback_method_name)(data)
return True
# --------------------------------------------------
# SERVER2SERVER RELATED METHODS
# --------------------------------------------------
def s2s_do_transaction(self, **kwargs):
custom_method_name = '%s_s2s_do_transaction' % self.acquirer_id.provider
for trans in self:
trans._log_payment_transaction_sent()
if hasattr(trans, custom_method_name):
return getattr(trans, custom_method_name)(**kwargs)
def s2s_do_refund(self, **kwargs):
custom_method_name = '%s_s2s_do_refund' % self.acquirer_id.provider
if hasattr(self, custom_method_name):
return getattr(self, custom_method_name)(**kwargs)
def s2s_capture_transaction(self, **kwargs):
custom_method_name = '%s_s2s_capture_transaction' % self.acquirer_id.provider
if hasattr(self, custom_method_name):
return getattr(self, custom_method_name)(**kwargs)
def s2s_void_transaction(self, **kwargs):
custom_method_name = '%s_s2s_void_transaction' % self.acquirer_id.provider
if hasattr(self, custom_method_name):
return getattr(self, custom_method_name)(**kwargs)
def s2s_get_tx_status(self):
""" Get the tx status. """
invalid_param_method_name = '_%s_s2s_get_tx_status' % self.acquirer_id.provider
if hasattr(self, invalid_param_method_name):
return getattr(self, invalid_param_method_name)()
return True
def execute_callback(self):
res = None
for transaction in self:
# limited sudo env, only for checking callback presence, not for running it!
# manual transactions have no callback, and can pass without being run by admin user
tx_sudo = transaction.sudo()
if not (tx_sudo.callback_model_id and tx_sudo.callback_res_id and tx_sudo.callback_method):
continue
valid_token = transaction._generate_callback_hash()
if not consteq(ustr(valid_token), transaction.callback_hash):
_logger.warning("Invalid callback signature for transaction %d" % (transaction.id))
continue
record = self.env[transaction.callback_model_id.model].browse(transaction.callback_res_id).exists()
if record:
res = getattr(record, transaction.callback_method)(transaction)
else:
_logger.warning("Did not found record %s.%s for callback of transaction %d" % (transaction.callback_model_id.model, transaction.callback_res_id, transaction.id))
return res
def action_capture(self):
if any([t.state != 'authorized' for t in self]):
raise ValidationError(_('Only transactions having the capture status can be captured.'))
for tx in self:
tx.s2s_capture_transaction()
def action_void(self):
if any([t.state != 'authorized' for t in self]):
raise ValidationError(_('Only transactions having the capture status can be voided.'))
for tx in self:
tx.s2s_void_transaction()
class PaymentToken(models.Model):
_name = 'payment.token'
_order = 'partner_id, id desc'
_description = 'Payment Token'
name = fields.Char('Name', help='Name of the payment token')
short_name = fields.Char('Short name', compute='_compute_short_name')
partner_id = fields.Many2one('res.partner', 'Partner', required=True)
acquirer_id = fields.Many2one('payment.acquirer', 'Acquirer Account', required=True)
company_id = fields.Many2one(related='acquirer_id.company_id', store=True, index=True)
acquirer_ref = fields.Char('Acquirer Ref.', required=True)
active = fields.Boolean('Active', default=True)
payment_ids = fields.One2many('payment.transaction', 'payment_token_id', 'Payment Transactions')
verified = fields.Boolean(string='Verified', default=False)
@api.model
def create(self, values):
# call custom create method if defined
if values.get('acquirer_id'):
acquirer = self.env['payment.acquirer'].browse(values['acquirer_id'])
# custom create
custom_method_name = '%s_create' % acquirer.provider
if hasattr(self, custom_method_name):
values.update(getattr(self, custom_method_name)(values))
# remove all non-model fields used by (provider)_create method to avoid warning
fields_wl = set(self._fields) & set(values)
values = {field: values[field] for field in fields_wl}
return super(PaymentToken, self).create(values)
"""
@TBE: stolen shamelessly from there https://www.paypal.com/us/selfhelp/article/why-is-there-a-$1.95-charge-on-my-card-statement-faq554
Most of them are ~1.50€s
TODO: See this with @AL & @DBO
"""
VALIDATION_AMOUNTS = {
'CAD': 2.45,
'EUR': 1.50,
'GBP': 1.00,
'JPY': 200,
'AUD': 2.00,
'NZD': 3.00,
'CHF': 3.00,
'HKD': 15.00,
'SEK': 15.00,
'DKK': 12.50,
'PLN': 6.50,
'NOK': 15.00,
'HUF': 400.00,
'CZK': 50.00,
'BRL': 4.00,
'MYR': 10.00,
'MXN': 20.00,
'ILS': 8.00,
'PHP': 100.00,
'TWD': 70.00,
'THB': 70.00
}
@api.model
def validate(self, **kwargs):
"""
This method allow to verify if this payment method is valid or not.
It does this by withdrawing a certain amount and then refund it right after.
"""
currency = self.partner_id.currency_id
if self.VALIDATION_AMOUNTS.get(currency.name):
amount = self.VALIDATION_AMOUNTS.get(currency.name)
else:
# If we don't find the user's currency, then we set the currency to EUR and the amount to 1€50.
currency = self.env['res.currency'].search([('name', '=', 'EUR')])
amount = 1.5
if len(currency) != 1:
_logger.error("Error 'EUR' currency not found for payment method validation!")
return False
reference = "VALIDATION-%s-%s" % (self.id, datetime.now().strftime('%y%m%d_%H%M%S'))
tx = self.env['payment.transaction'].sudo().create({
'amount': amount,
'acquirer_id': self.acquirer_id.id,
'type': 'validation',
'currency_id': currency.id,
'reference': reference,
'payment_token_id': self.id,
'partner_id': self.partner_id.id,
'partner_country_id': self.partner_id.country_id.id,
'state_message': _('This Transaction was automatically processed & refunded in order to validate a new credit card.'),
})
kwargs.update({'3d_secure': True})
tx.s2s_do_transaction(**kwargs)
# if 3D secure is called, then we do not refund right now
if not tx.html_3ds:
tx.s2s_do_refund()
return tx
@api.depends('name')
def _compute_short_name(self):
for token in self:
token.short_name = token.name.replace('XXXXXXXXXXXX', '***')
def get_linked_records(self):
""" This method returns a dict containing all the records linked to the payment.token (e.g Subscriptions),
the key is the id of the payment.token and the value is an array that must follow the scheme below.
{
token_id: [
'description': The model description (e.g 'Sale Subscription'),
'id': The id of the record,
'name': The name of the record,
'url': The url to access to this record.
]
}
"""
return {r.id:[] for r in self}
| agpl-3.0 |
lanbing510/GTDWeb | django/contrib/gis/gdal/prototypes/raster.py | 67 | 3694 | """
This module houses the ctypes function prototypes for GDAL DataSource (raster)
related data structures.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p
from functools import partial
from django.contrib.gis.gdal.libgdal import std_call
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, int_output, void_output,
voidptr_output,
)
# For more detail about c function names and definitions see
# http://gdal.org/gdal_8h.html
# http://gdal.org/gdalwarper_8h.html
# Prepare partial functions that use cpl error codes
void_output = partial(void_output, cpl=True)
const_string_output = partial(const_string_output, cpl=True)
double_output = partial(double_output, cpl=True)
# Raster Driver Routines
register_all = void_output(std_call('GDALAllRegister'), [])
get_driver = voidptr_output(std_call('GDALGetDriver'), [c_int])
get_driver_by_name = voidptr_output(std_call('GDALGetDriverByName'), [c_char_p], errcheck=False)
get_driver_count = int_output(std_call('GDALGetDriverCount'), [])
get_driver_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
# Raster Data Source Routines
create_ds = voidptr_output(std_call('GDALCreate'), [c_void_p, c_char_p, c_int, c_int, c_int, c_int, c_void_p])
open_ds = voidptr_output(std_call('GDALOpen'), [c_char_p, c_int])
close_ds = void_output(std_call('GDALClose'), [c_void_p])
copy_ds = voidptr_output(std_call('GDALCreateCopy'),
[c_void_p, c_char_p, c_void_p, c_int, POINTER(c_char_p), c_void_p, c_void_p]
)
add_band_ds = void_output(std_call('GDALAddBand'), [c_void_p, c_int])
get_ds_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_ds_driver = voidptr_output(std_call('GDALGetDatasetDriver'), [c_void_p])
get_ds_xsize = int_output(std_call('GDALGetRasterXSize'), [c_void_p])
get_ds_ysize = int_output(std_call('GDALGetRasterYSize'), [c_void_p])
get_ds_raster_count = int_output(std_call('GDALGetRasterCount'), [c_void_p])
get_ds_raster_band = voidptr_output(std_call('GDALGetRasterBand'), [c_void_p, c_int])
get_ds_projection_ref = const_string_output(std_call('GDALGetProjectionRef'), [c_void_p])
set_ds_projection_ref = void_output(std_call('GDALSetProjection'), [c_void_p, c_char_p])
get_ds_geotransform = void_output(std_call('GDALGetGeoTransform'), [c_void_p, POINTER(c_double * 6)], errcheck=False)
set_ds_geotransform = void_output(std_call('GDALSetGeoTransform'), [c_void_p, POINTER(c_double * 6)])
# Raster Band Routines
band_io = void_output(std_call('GDALRasterIO'),
[c_void_p, c_int, c_int, c_int, c_int, c_int, c_void_p, c_int, c_int, c_int, c_int, c_int]
)
get_band_xsize = int_output(std_call('GDALGetRasterBandXSize'), [c_void_p])
get_band_ysize = int_output(std_call('GDALGetRasterBandYSize'), [c_void_p])
get_band_index = int_output(std_call('GDALGetBandNumber'), [c_void_p])
get_band_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_band_ds = voidptr_output(std_call('GDALGetBandDataset'), [c_void_p])
get_band_datatype = int_output(std_call('GDALGetRasterDataType'), [c_void_p])
get_band_nodata_value = double_output(std_call('GDALGetRasterNoDataValue'), [c_void_p, POINTER(c_int)])
set_band_nodata_value = void_output(std_call('GDALSetRasterNoDataValue'), [c_void_p, c_double])
get_band_minimum = double_output(std_call('GDALGetRasterMinimum'), [c_void_p, POINTER(c_int)])
get_band_maximum = double_output(std_call('GDALGetRasterMaximum'), [c_void_p, POINTER(c_int)])
# Reprojection routine
reproject_image = void_output(std_call('GDALReprojectImage'),
[c_void_p, c_char_p, c_void_p, c_char_p, c_int, c_double, c_double, c_void_p, c_void_p, c_void_p]
)
| gpl-2.0 |
daniponi/django | tests/template_tests/syntax_tests/test_if.py | 13 | 24184 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import TestObj, setup
class IfTagTests(SimpleTestCase):
@setup({'if-tag01': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag01(self):
output = self.engine.render_to_string('if-tag01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag02': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag02(self):
output = self.engine.render_to_string('if-tag02', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag03': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag03(self):
output = self.engine.render_to_string('if-tag03')
self.assertEqual(output, 'no')
@setup({'if-tag04': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag04(self):
output = self.engine.render_to_string('if-tag04', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag05': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag05(self):
output = self.engine.render_to_string('if-tag05', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag06': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag06(self):
output = self.engine.render_to_string('if-tag06')
self.assertEqual(output, '')
@setup({'if-tag07': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag07(self):
output = self.engine.render_to_string('if-tag07', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag08': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag08(self):
output = self.engine.render_to_string('if-tag08', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag09': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag09(self):
output = self.engine.render_to_string('if-tag09')
self.assertEqual(output, 'nothing')
@setup({'if-tag10': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag10(self):
output = self.engine.render_to_string('if-tag10', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag11': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag11(self):
output = self.engine.render_to_string('if-tag11', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag12': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag12(self):
output = self.engine.render_to_string('if-tag12', {'baz': True})
self.assertEqual(output, 'baz')
@setup({'if-tag13': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag13(self):
output = self.engine.render_to_string('if-tag13')
self.assertEqual(output, 'nothing')
# Filters
@setup({'if-tag-filter01': '{% if foo|length == 5 %}yes{% else %}no{% endif %}'})
def test_if_tag_filter01(self):
output = self.engine.render_to_string('if-tag-filter01', {'foo': 'abcde'})
self.assertEqual(output, 'yes')
@setup({'if-tag-filter02': '{% if foo|upper == \'ABC\' %}yes{% else %}no{% endif %}'})
def test_if_tag_filter02(self):
output = self.engine.render_to_string('if-tag-filter02')
self.assertEqual(output, 'no')
# Equality
@setup({'if-tag-eq01': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq01(self):
output = self.engine.render_to_string('if-tag-eq01')
self.assertEqual(output, 'yes')
@setup({'if-tag-eq02': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq02(self):
output = self.engine.render_to_string('if-tag-eq02', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-eq03': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq03(self):
output = self.engine.render_to_string('if-tag-eq03', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-eq04': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq04(self):
output = self.engine.render_to_string('if-tag-eq04', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'no')
@setup({'if-tag-eq05': '{% if foo == \'\' %}yes{% else %}no{% endif %}'})
def test_if_tag_eq05(self):
output = self.engine.render_to_string('if-tag-eq05')
self.assertEqual(output, 'no')
# Comparison
@setup({'if-tag-gt-01': '{% if 2 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_01(self):
output = self.engine.render_to_string('if-tag-gt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gt-02': '{% if 1 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_02(self):
output = self.engine.render_to_string('if-tag-gt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-gte-01': '{% if 1 >= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_01(self):
output = self.engine.render_to_string('if-tag-gte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gte-02': '{% if 1 >= 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_02(self):
output = self.engine.render_to_string('if-tag-gte-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lt-01': '{% if 1 < 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_01(self):
output = self.engine.render_to_string('if-tag-lt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lt-02': '{% if 1 < 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_02(self):
output = self.engine.render_to_string('if-tag-lt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lte-01': '{% if 1 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_01(self):
output = self.engine.render_to_string('if-tag-lte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lte-02': '{% if 2 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_02(self):
output = self.engine.render_to_string('if-tag-lte-02')
self.assertEqual(output, 'no')
# Contains
@setup({'if-tag-in-01': '{% if 1 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_01(self):
output = self.engine.render_to_string('if-tag-in-01', {'x': [1]})
self.assertEqual(output, 'yes')
@setup({'if-tag-in-02': '{% if 2 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_02(self):
output = self.engine.render_to_string('if-tag-in-02', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-01': '{% if 1 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_01(self):
output = self.engine.render_to_string('if-tag-not-in-01', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-02': '{% if 2 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_02(self):
output = self.engine.render_to_string('if-tag-not-in-02', {'x': [1]})
self.assertEqual(output, 'yes')
# AND
@setup({'if-tag-and01': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and01(self):
output = self.engine.render_to_string('if-tag-and01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-and02': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and02(self):
output = self.engine.render_to_string('if-tag-and02', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and03': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and03(self):
output = self.engine.render_to_string('if-tag-and03', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and04': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and04(self):
output = self.engine.render_to_string('if-tag-and04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and05': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and05(self):
output = self.engine.render_to_string('if-tag-and05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and06': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and06(self):
output = self.engine.render_to_string('if-tag-and06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and07': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and07(self):
output = self.engine.render_to_string('if-tag-and07', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and08': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and08(self):
output = self.engine.render_to_string('if-tag-and08', {'bar': True})
self.assertEqual(output, 'no')
# OR
@setup({'if-tag-or01': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or01(self):
output = self.engine.render_to_string('if-tag-or01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or02': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or02(self):
output = self.engine.render_to_string('if-tag-or02', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-or03': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or03(self):
output = self.engine.render_to_string('if-tag-or03', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or04': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or04(self):
output = self.engine.render_to_string('if-tag-or04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or05': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or05(self):
output = self.engine.render_to_string('if-tag-or05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or06': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or06(self):
output = self.engine.render_to_string('if-tag-or06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or07': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or07(self):
output = self.engine.render_to_string('if-tag-or07', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or08': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or08(self):
output = self.engine.render_to_string('if-tag-or08', {'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or09': '{% if foo or bar or baz %}yes{% else %}no{% endif %}'})
def test_if_tag_or09(self):
"""
multiple ORs
"""
output = self.engine.render_to_string('if-tag-or09', {'baz': True})
self.assertEqual(output, 'yes')
# NOT
@setup({'if-tag-not01': '{% if not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not01(self):
output = self.engine.render_to_string('if-tag-not01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not02': '{% if not not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not02(self):
output = self.engine.render_to_string('if-tag-not02', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not06': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not06(self):
output = self.engine.render_to_string('if-tag-not06')
self.assertEqual(output, 'no')
@setup({'if-tag-not07': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not07(self):
output = self.engine.render_to_string('if-tag-not07', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not08': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not08(self):
output = self.engine.render_to_string('if-tag-not08', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not09': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not09(self):
output = self.engine.render_to_string('if-tag-not09', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not10': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not10(self):
output = self.engine.render_to_string('if-tag-not10', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not11': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not11(self):
output = self.engine.render_to_string('if-tag-not11')
self.assertEqual(output, 'no')
@setup({'if-tag-not12': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not12(self):
output = self.engine.render_to_string('if-tag-not12', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not13': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not13(self):
output = self.engine.render_to_string('if-tag-not13', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not14': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not14(self):
output = self.engine.render_to_string('if-tag-not14', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not15': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not15(self):
output = self.engine.render_to_string('if-tag-not15', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not16': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not16(self):
output = self.engine.render_to_string('if-tag-not16')
self.assertEqual(output, 'yes')
@setup({'if-tag-not17': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not17(self):
output = self.engine.render_to_string('if-tag-not17', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not18': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not18(self):
output = self.engine.render_to_string('if-tag-not18', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not19': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not19(self):
output = self.engine.render_to_string('if-tag-not19', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not20': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not20(self):
output = self.engine.render_to_string('if-tag-not20', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not21': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not21(self):
output = self.engine.render_to_string('if-tag-not21')
self.assertEqual(output, 'yes')
@setup({'if-tag-not22': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not22(self):
output = self.engine.render_to_string('if-tag-not22', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not23': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not23(self):
output = self.engine.render_to_string('if-tag-not23', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not24': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not24(self):
output = self.engine.render_to_string('if-tag-not24', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not25': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not25(self):
output = self.engine.render_to_string('if-tag-not25', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not26': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not26(self):
output = self.engine.render_to_string('if-tag-not26')
self.assertEqual(output, 'yes')
@setup({'if-tag-not27': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not27(self):
output = self.engine.render_to_string('if-tag-not27', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not28': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not28(self):
output = self.engine.render_to_string('if-tag-not28', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not29': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not29(self):
output = self.engine.render_to_string('if-tag-not29', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not30': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not30(self):
output = self.engine.render_to_string('if-tag-not30', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not31': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not31(self):
output = self.engine.render_to_string('if-tag-not31')
self.assertEqual(output, 'yes')
@setup({'if-tag-not32': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not32(self):
output = self.engine.render_to_string('if-tag-not32', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not33': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not33(self):
output = self.engine.render_to_string('if-tag-not33', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not34': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not34(self):
output = self.engine.render_to_string('if-tag-not34', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not35': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not35(self):
output = self.engine.render_to_string('if-tag-not35', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
# Various syntax errors
@setup({'if-tag-error01': '{% if %}yes{% endif %}'})
def test_if_tag_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error01')
@setup({'if-tag-error02': '{% if foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error02', {'foo': True})
@setup({'if-tag-error03': '{% if foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error03', {'foo': True})
@setup({'if-tag-error04': '{% if not foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error04', {'foo': True})
@setup({'if-tag-error05': '{% if not foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error05', {'foo': True})
@setup({'if-tag-error06': '{% if abc def %}yes{% endif %}'})
def test_if_tag_error06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error06')
@setup({'if-tag-error07': '{% if not %}yes{% endif %}'})
def test_if_tag_error07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error07')
@setup({'if-tag-error08': '{% if and %}yes{% endif %}'})
def test_if_tag_error08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error08')
@setup({'if-tag-error09': '{% if or %}yes{% endif %}'})
def test_if_tag_error09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error09')
@setup({'if-tag-error10': '{% if == %}yes{% endif %}'})
def test_if_tag_error10(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error10')
@setup({'if-tag-error11': '{% if 1 == %}yes{% endif %}'})
def test_if_tag_error11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error11')
@setup({'if-tag-error12': '{% if a not b %}yes{% endif %}'})
def test_if_tag_error12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error12')
@setup({'if-tag-shortcircuit01': '{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit01(self):
"""
If evaluations are shortcircuited where possible
"""
output = self.engine.render_to_string('if-tag-shortcircuit01', {'x': TestObj()})
self.assertEqual(output, 'yes')
@setup({'if-tag-shortcircuit02': '{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit02(self):
"""
The is_bad() function should not be evaluated. If it is, an
exception is raised.
"""
output = self.engine.render_to_string('if-tag-shortcircuit02', {'x': TestObj()})
self.assertEqual(output, 'no')
@setup({'if-tag-badarg01': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg01(self):
"""
Non-existent args
"""
output = self.engine.render_to_string('if-tag-badarg01')
self.assertEqual(output, '')
@setup({'if-tag-badarg02': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg02(self):
output = self.engine.render_to_string('if-tag-badarg02', {'y': 0})
self.assertEqual(output, '')
@setup({'if-tag-badarg03': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg03(self):
output = self.engine.render_to_string('if-tag-badarg03', {'y': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-badarg04': '{% if x|default_if_none:y %}yes{% else %}no{% endif %}'})
def test_if_tag_badarg04(self):
output = self.engine.render_to_string('if-tag-badarg04')
self.assertEqual(output, 'no')
@setup({'if-tag-single-eq': '{% if foo = bar %}yes{% else %}no{% endif %}'})
def test_if_tag_single_eq(self):
# A single equals sign is a syntax error.
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-single-eq', {'foo': 1})
@setup({'template': '{% if foo is True %}yes{% else %}no{% endif %}'})
def test_if_is_match(self):
output = self.engine.render_to_string('template', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'template': '{% if foo is True %}yes{% else %}no{% endif %}'})
def test_if_is_no_match(self):
output = self.engine.render_to_string('template', {'foo': 1})
self.assertEqual(output, 'no')
| bsd-3-clause |
cosurgi/trunk | examples/HydroForceEngine/twoWayCoupling/sedimentTransportExample_1DRANSCoupling.py | 2 | 13901 | from __future__ import print_function
#########################################################################################################################################################################
# Author: Raphael Maurin, raphael.maurin@imft.fr
# 24/11/2017
#
# Same as sedimentTransportExample but solving a 1D volume averaged fluid momentum balance to determine the fluid velocity profile (i.e. DEM-1D RANS coupling)
# The resolution therefore include a two-way coupling in time between the fluid and the particle behavior, meaning that the fluid is solved every "fluidResolPeriod"
# and account for the presence of particles and for the momentum transfered to the particle phase through the hydrodynamic forces imposed. The fluid-particle system momentum
# is therefore conserved.
#
# Data can be saved and plot with the file postProcessing.py
#
############################################################################################################################################################################
#Import libraries
from builtins import range
from yade import pack, plot
import math
import random as rand
import numpy as np
##
## Main parameters of the simulation
##
#Particles
diameterPart = 6e-3 #Diameter of the particles, in m
densPart = 2500 #density of the particles, in kg/m3
phiPartMax = 0.61 #Value of the dense packing solid volume fraction, dimensionless
restitCoef = 0.7 #Restitution coefficient of the particles, dimensionless
partFrictAngle = atan(0.5) #friction angle of the particles, in radian
#Fluid
densFluidPY = 1000. #Density of the fluid, in kg/m^3
kinematicViscoFluid = 1e-6 #kinematic viscosity of the fluid, in m^2/s
waterDepth = 17.5 #Water depth, in diameter
dtFluid = 1e-5 #Time step for the fluid resolution, in s
fluidResolPeriod = 1e-2 #Time between two fluid resolution, in s
#Configuration: inclined channel
slope = 0.05 #Inclination angle of the channel slope in radian
lengthCell = 10 #Streamwise length of the periodic cell, in diameter
widthCell = 10 #Spanwise length of the periodic cell, in diameter
Nlayer = 10. #nb of layer of particle, in diameter
fluidHeight = (Nlayer+waterDepth)*diameterPart #Height of the flow from the bottom of the sample, in m
saveData = 1 #If put to 1, at each execution of function measure() save the sediment transport rate, fluid velocity, solid volume fraction and velocity profiles for post-processing
endTime = 100 #Time simulated (in seconds)
##
## Secondary parameters of the simulation
##
expoDrag_PY = 3.1 # Richardson Zaki exponent for the hindrance function of the drag force applied to the particles
#Discretization of the sample in ndimz wall-normal (z) steps of size dz, between the bottom of the channel and the position of the water free-surface. Should be equal to the length of the imposed fluid profile. Mesh used for HydroForceEngine.
ndimz = 900 #Number of cells in the height
dz = fluidHeight/(1.0*(ndimz-1)) # Fluid discretization step in the wall-normal direction
# Initialization of the main vectors
vxFluidPY = np.zeros(ndimz+1) # Vertical fluid velocity profile: u^f = u_x^f(z) e_x, with x the streamwise direction and z the wall-normal. Fluid velocity defined in between the mesh nodes and at the node at the two boundaries, i.e. at ndimz-1 + 2 location = ndimz+1.
phiPartPY = np.zeros(ndimz-1) # Vertical particle volume fraction profile, evaluated in between the cells, i.e. at ndimz-1 locations
vxPartPY = np.zeros(ndimz-1) # Vertical average particle velocity profile, evaluated in between the cells, i.e. at ndimz-1 locations
#Geometrical configuration, define useful quantities
height = 5*fluidHeight #heigth of the periodic cell, in m (bigger than the fluid height to take into particles jumping above the latter)
length = lengthCell*diameterPart #length of the stream, in m
width = widthCell*diameterPart #width of the stream, in m
groundPosition = height/4.0 #Definition of the position of the ground, in m
gravityVector = Vector3(9.81*sin(slope),0.0,-9.81*cos(slope)) #Gravity vector to consider a channel inclined with slope angle 'slope'
#Particles contact law/material parameters
maxPressure = (densPart-densFluidPY)*phiPartMax*Nlayer*diameterPart*abs(gravityVector[2]) #Estimated max particle pressure from the static load
normalStiffness = maxPressure*diameterPart*1e4 #Evaluate the minimal normal stiffness to be in the rigid particle limit (cf Roux and Combe 2002)
youngMod = normalStiffness/diameterPart #Young modulus of the particles from the stiffness wanted.
poissonRatio = 0.5 #poisson's ratio of the particles. Classical values, does not have much influence
O.materials.append(ViscElMat(en=restitCoef, et=0., young=youngMod, poisson=poissonRatio, density=densPart, frictionAngle=partFrictAngle, label='Mat'))
########################
## FRAMEWORK CREATION ##
########################
#Definition of the semi-periodic cell
O.periodic = True
O.cell.setBox(length,width,height)
# Reference walls: build two planes at the ground and free-surface to have a reference for the eyes in the 3D view
lowPlane = box(center= (length/2.0, width/2.0,groundPosition),extents=(200,200,0),fixed=True,wire=False,color = (0.,1.,0.),material = 'Mat')
WaterSurface = box(center= (length/2.0, width/2.0,groundPosition+fluidHeight),extents=(2000,width/2.0,0),fixed=True,wire=False,color = (0,0,1),material = 'Mat',mask = 0)
O.bodies.append([lowPlane,WaterSurface]) #add to simulation
# Regular arrangement of spheres sticked at the bottom with random height
L = list(range(0,int(length/(diameterPart)))) #The length is divided in particle diameter
W = list(range(0,int(width/(diameterPart)))) #The width is divided in particle diameter
for x in L: #loop creating a set of sphere sticked at the bottom with a random altitude comprised between -0.5 and 0.5 diameter around groundPosition.
for y in W:
n = rand.random() #Define a number between 0 and 1
O.bodies.append(sphere((x*diameterPart, y*diameterPart,groundPosition + (-0.5+n)*diameterPart),diameterPart/2.,color=(0,0,0),fixed = True,material = 'Mat'))
#Create a loose cloud of particle inside the cell
partCloud = pack.SpherePack()
partVolume = pi/6.*pow(diameterPart,3) #Volume of a particle
partNumber = int(Nlayer*phiPartMax*diameterPart*length*width/partVolume) #Volume of beads to obtain Nlayer layers of particles
partCloud.makeCloud(minCorner=(0,0.,groundPosition+diameterPart),maxCorner=(length,width,groundPosition+fluidHeight),rRelFuzz=0., rMean=diameterPart/2.0, num = partNumber)
partCloud.toSimulation(material='Mat') #Send this packing to simulation with material Mat
#Evaluate the deposition time considering the free-fall time of the highest particle to the ground
depoTime = sqrt(fluidHeight*2/abs(gravityVector[2]))
# Collect the ids of the spheres which are dynamic to add a fluid force through HydroForceEngines
idApplyForce = []
for b in O.bodies:
if isinstance(b.shape,Sphere) and b.dynamic:
idApplyForce+=[b.id]
#########################
#### SIMULATION LOOP#####
#########################
O.engines = [
# Reset the forces
ForceResetter(),
# Detect the potential contacts
InsertionSortCollider([Bo1_Sphere_Aabb(), Bo1_Wall_Aabb(),Bo1_Facet_Aabb(),Bo1_Box_Aabb()],label='contactDetection',allowBiggerThanPeriod = True),
# Calculate the different interactions
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Box_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()]
,label = 'interactionLoop'),
#Apply an hydrodynamic force to the particles
HydroForceEngine(densFluid = densFluidPY,viscoDyn = kinematicViscoFluid*densFluidPY,zRef = groundPosition,gravity = gravityVector,deltaZ = dz,expoRZ = expoDrag_PY,lift = False,nCell = ndimz,vCell = length*width*dz,radiusPart=diameterPart/2.,vxFluid = np.array(vxFluidPY),phiPart = phiPartPY,vxPart = vxPartPY,ids = idApplyForce, label = 'hydroEngine', dead = True),
#Solve the fluid volume-averaged 1D momentum balance, RANS 1D
PyRunner(command = 'fluidModel()', virtPeriod = fluidResolPeriod, label = 'fluidRes', dead = True),
#Measurement, output files
PyRunner(command = 'measure()', virtPeriod = 0.1, label = 'measurement', dead = True),
# Check if the packing is stabilized, if yes activate the hydro force on the grains and the slope.
PyRunner(command='gravityDeposition(depoTime)',virtPeriod = 0.01,label = 'gravDepo'),
#GlobalStiffnessTimeStepper, determine the time step
GlobalStiffnessTimeStepper(defaultDt = 1e-4, viscEl = True,timestepSafetyCoefficient = 0.7, label = 'GSTS'),
# Integrate the equation and calculate the new position/velocities...
NewtonIntegrator(damping=0.2, gravity=gravityVector, label='newtonIntegr')
]
#save the initial configuration to be able to recharge the simulation starting configuration easily
O.saveTmp()
#run
O.run()
####################################################################################################################################
#################################################### FUNCTION DEFINITION #########################################################
####################################################################################################################################
###### ######
### LET THE TIME FOR THE GRAVITY DEPOSITION AND ACTIVATE THE FLUID AT THE END ###
###### ######
def gravityDeposition(lim):
if O.time<lim : return
else :
print('\n Gravity deposition finished, apply fluid forces !\n')
newtonIntegr.damping = 0.0 # Set the artificial numerical damping to zero
gravDepo.dead = True # Remove the present engine for the following
hydroEngine.dead = False # Activate the HydroForceEngine
hydroEngine.vxFluid = vxFluidPY # Send the fluid velocity vector used to apply the drag fluid force on particles in HydroForceEngine (see c++ code)
hydroEngine.ReynoldStresses = np.ones(ndimz)*1e-4 # Send the simplified fluid Reynolds stresses Rxz/\rho^f used to account for the fluid velocity fluctuations in HydroForceEngine (see c++ code)
hydroEngine.turbulentFluctuation() #Initialize the fluid velocity fluctuation associated to particles to zero in HydroForceEngine, necessary to avoid segmentation fault
measurement.dead = False # Activate the measure() PyRunner
fluidRes.dead = False # Activate the 1D fluid resolution
hydroEngine.averageProfile() #Evaluate the solid volume fraction, velocity and drag, necessary for the fluid resolution.
hydroEngine.fluidResolution(1.,dtFluid) #Initialize the fluid resolution, run the fluid resolution for 1s
return
###############
#########################################
####### ########
### FLUID RESOLUTION ###
####### ########
def fluidModel():
global vxFluidPY,taufsi
#Evaluate the average vx,vy,vz,phi,drag profiles and store it in hydroEngine, to prepare the fluid resolution
hydroEngine.averageProfile()
#Fluid resolution
hydroEngine.fluidResolution(fluidResolPeriod,dtFluid) #Solve the fluid momentum balance for a time of fluidResolPeriod s with a time step dtFluid
#update the fluid velocity for later save
vxFluidPY = np.array(hydroEngine.vxFluid)
####### ########
### OUTPUT ###
####### ########
#Initialization
qsMean = 0 #Mean dimensionless sediment transport rate
zAxis = np.zeros(ndimz) #z scale, in diameter
for i in range(0,ndimz):#z scale used for the possible plot at the end
zAxis[i] = i*dz/diameterPart
# Averaging/Save
def measure():
global qsMean,vxPartPY,phiPartPY
#Evaluate the average depth profile of streamwise, spanwise and wall-normal particle velocity, particle volume fraction (and drag force for coupling with RANS fluid resolution), and store it in hydroEngine variables vxPart, phiPart, vyPart, vzPart, averageDrag.
hydroEngine.averageProfile()
#Extract the calculated vector. They can be saved and plotted afterwards.
vxPartPY = np.array(hydroEngine.vxPart)
phiPartPY = np.array(hydroEngine.phiPart)
#Evaluate the dimensionless sediment transport rate for information
qsMean = sum(phiPartPY*vxPartPY)*dz/sqrt((densPart/densFluidPY - 1)*abs(gravityVector[2])*pow(diameterPart,3))
plot.addData(SedimentRate = qsMean, time = O.time) #Plot it during the simulation
#Condition to stop the simulation after endTime seconds
if O.time>=endTime:
print('\n End of the simulation, simulated {0}s as required !\n '.format(endTime))
O.pause()
#Evaluate the Shields number from the maximum of the Reynolds stresses evaluated in the fluid resolution
shieldsNumber = max(hydroEngine.ReynoldStresses)/((densPart-densFluidPY)*diameterPart*abs(gravityVector[2]))
print('Shields number', shieldsNumber)
if saveData==1: #Save data for postprocessing
global fileNumber
nameFile = scriptPath + '/data/'+ str(fileNumber)+'.py' # Name of the file that will be saved
globalParam = ['qsMean','phiPartPY','vxPartPY','vxFluidPY','zAxis'] # Variables to save
Save(nameFile, globalParam) #Save
fileNumber+=1 #Increment the file number
#Plot the dimensionless sediment transport rate as a function of time during the simulation
plot.plots={'time':('SedimentRate')}
plot.plot()
################
##########################################
#Save data details
fileNumber = 0 # Counter for the file saved
if saveData==1: #If saveData option is activated, requires a folder data
scriptPath = os.path.abspath(os.path.dirname(sys.argv[-1])) #Path where the script is stored
if os.path.exists(scriptPath +'/data/')==False:
os.mkdir(scriptPath +'/data/')
else:
print('\n!! Save data: overwrite the files contains in the folder data/ !!\n')
#Function to save global variables in a python file which can be re-executed for post-processing
def Save(filePathName, globalVariables):
f = open(filePathName,'w')
f.write('from numpy import *\n')
for i in globalVariables:
f.write(i + ' = '+repr(globals()[i]) + '\n')
f.close()
| gpl-2.0 |
City-of-Bloomington/green-rental | allauth/socialaccount/providers/openid/migrations/0003_auto__del_openidaccount.py | 82 | 1958 | # encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'OpenIDAccount'
db.delete_table('openid_openidaccount')
def backwards(self, orm):
# Adding model 'OpenIDAccount'
db.create_table('openid_openidaccount', (
('socialaccount_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['socialaccount.SocialAccount'], unique=True, primary_key=True)),
('identity', self.gf('django.db.models.fields.URLField')(max_length=255, unique=True)),
))
db.send_create_signal('openid', ['OpenIDAccount'])
models = {
'openid.openidnonce': {
'Meta': {'object_name': 'OpenIDNonce'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {})
},
'openid.openidstore': {
'Meta': {'object_name': 'OpenIDStore'},
'assoc_type': ('django.db.models.fields.TextField', [], {}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.TextField', [], {}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['openid']
| agpl-3.0 |
ltiao/scikit-learn | sklearn/decomposition/kernel_pca.py | 4 | 9144 | """Kernel Principal Components Analysis"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import KernelCenterer
from ..metrics.pairwise import pairwise_kernels
class KernelPCA(BaseEstimator, TransformerMixin):
"""Kernel Principal component analysis (KPCA)
Non-linear dimensionality reduction through the use of kernels (see
:ref:`metrics`).
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components: int or None
Number of components. If None, all non-zero components are kept.
kernel: "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel.
Default: "linear"
degree : int, default=3
Degree for poly kernels. Ignored by other kernels.
gamma : float, optional
Kernel coefficient for rbf and poly kernels. Default: 1/n_features.
Ignored by other kernels.
coef0 : float, optional
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
alpha: int
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
Default: 1.0
fit_inverse_transform: bool
Learn the inverse transform for non-precomputed kernels.
(i.e. learn to find the pre-image of a point)
Default: False
eigen_solver: string ['auto'|'dense'|'arpack']
Select eigensolver to use. If n_components is much less than
the number of training samples, arpack may be more efficient
than the dense eigensolver.
tol: float
convergence tolerance for arpack.
Default: 0 (optimal value will be chosen by arpack)
max_iter : int
maximum number of iterations for arpack
Default: None (optimal value will be chosen by arpack)
remove_zero_eig : boolean, default=True
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
Attributes
----------
lambdas_ :
Eigenvalues of the centered kernel matrix
alphas_ :
Eigenvectors of the centered kernel matrix
dual_coef_ :
Inverse transform matrix
X_transformed_fit_ :
Projection of the fitted data on the kernel principal components
References
----------
Kernel PCA was introduced in:
Bernhard Schoelkopf, Alexander J. Smola,
and Klaus-Robert Mueller. 1999. Kernel principal
component analysis. In Advances in kernel methods,
MIT Press, Cambridge, MA, USA 327-352.
"""
def __init__(self, n_components=None, kernel="linear",
gamma=None, degree=3, coef0=1, kernel_params=None,
alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
tol=0, max_iter=None, remove_zero_eig=False):
if fit_inverse_transform and kernel == 'precomputed':
raise ValueError(
"Cannot fit_inverse_transform with a precomputed kernel.")
self.n_components = n_components
self.kernel = kernel
self.kernel_params = kernel_params
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.alpha = alpha
self.fit_inverse_transform = fit_inverse_transform
self.eigen_solver = eigen_solver
self.remove_zero_eig = remove_zero_eig
self.tol = tol
self.max_iter = max_iter
self._centerer = KernelCenterer()
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def _fit_transform(self, K):
""" Fit's using kernel K"""
# center kernel
K = self._centerer.fit_transform(K)
if self.n_components is None:
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
# compute eigenvectors
if self.eigen_solver == 'auto':
if K.shape[0] > 200 and n_components < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
else:
eigen_solver = self.eigen_solver
if eigen_solver == 'dense':
self.lambdas_, self.alphas_ = linalg.eigh(
K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1))
elif eigen_solver == 'arpack':
self.lambdas_, self.alphas_ = eigsh(K, n_components,
which="LA",
tol=self.tol,
maxiter=self.max_iter)
# sort eigenvectors in descending order
indices = self.lambdas_.argsort()[::-1]
self.lambdas_ = self.lambdas_[indices]
self.alphas_ = self.alphas_[:, indices]
# remove eigenvectors with a zero eigenvalue
if self.remove_zero_eig or self.n_components is None:
self.alphas_ = self.alphas_[:, self.lambdas_ > 0]
self.lambdas_ = self.lambdas_[self.lambdas_ > 0]
return K
def _fit_inverse_transform(self, X_transformed, X):
if hasattr(X, "tocsr"):
raise NotImplementedError("Inverse transform not implemented for "
"sparse matrices!")
n_samples = X_transformed.shape[0]
K = self._get_kernel(X_transformed)
K.flat[::n_samples + 1] += self.alpha
self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True)
self.X_transformed_fit_ = X_transformed
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X, **params)
X_transformed = self.alphas_ * np.sqrt(self.lambdas_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
def transform(self, X):
"""Transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, self.alphas_ / np.sqrt(self.lambdas_))
def inverse_transform(self, X):
"""Transform X back to original space.
Parameters
----------
X: array-like, shape (n_samples, n_components)
Returns
-------
X_new: array-like, shape (n_samples, n_features)
References
----------
"Learning to Find Pre-Images", G BakIr et al, 2004.
"""
if not self.fit_inverse_transform:
raise NotFittedError("The fit_inverse_transform parameter was not"
" set to True when instantiating and hence "
"the inverse transform is not available.")
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
rotofly/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
adedayo/intellij-community | python/helpers/pydev/pydevd_save_locals.py | 53 | 1523 | """
Utility for saving locals.
"""
import sys
import pydevd_vars
def is_save_locals_available():
try:
if '__pypy__' in sys.builtin_module_names:
import __pypy__
save_locals = __pypy__.locals_to_fast
return True
except:
pass
try:
import ctypes
except:
return False #Not all Python versions have it
try:
func = ctypes.pythonapi.PyFrame_LocalsToFast
except:
return False
return True
def save_locals(frame):
"""
Copy values from locals_dict into the fast stack slots in the given frame.
Note: the 'save_locals' branch had a different approach wrapping the frame (much more code, but it gives ideas
on how to save things partially, not the 'whole' locals).
"""
if not isinstance(frame, pydevd_vars.frame_type):
# Fix exception when changing Django variable (receiving DjangoTemplateFrame)
return
try:
if '__pypy__' in sys.builtin_module_names:
import __pypy__
save_locals = __pypy__.locals_to_fast
save_locals(frame)
return
except:
pass
try:
import ctypes
except:
return #Not all Python versions have it
try:
func = ctypes.pythonapi.PyFrame_LocalsToFast
except:
return
#parameter 0: don't set to null things that are not in the frame.f_locals (which seems good in the debugger context).
func(ctypes.py_object(frame), ctypes.c_int(0))
| apache-2.0 |
liorvh/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/html5lib/inputstream.py | 435 | 31665 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from pip._vendor.six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]"
if utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# unichr. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate +
eval('"\\uD800-\\uDFFF"'))
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
if not utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
self.replaceCharactersRegexp = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(eval('"[\\uD800-\\uDFFF]"'))
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(
eval('"([\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?<![\\uD800-\\uDBFF])[\\uDC00-\\uDFFF])"'))
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| gpl-3.0 |
glovebx/odoo | openerp/addons/base/ir/ir_cron.py | 24 | 15115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import threading
import time
import psycopg2
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
import openerp
from openerp import SUPERUSER_ID, netsvc, api
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.modules import load_information_from_description_file
_logger = logging.getLogger(__name__)
BASE_VERSION = load_information_from_description_file('base')['version']
def str2tuple(s):
return eval('tuple(%s)' % (s or ''))
_intervalTypes = {
'work_days': lambda interval: relativedelta(days=interval),
'days': lambda interval: relativedelta(days=interval),
'hours': lambda interval: relativedelta(hours=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'minutes': lambda interval: relativedelta(minutes=interval),
}
class ir_cron(osv.osv):
""" Model describing cron jobs (also called actions or tasks).
"""
# TODO: perhaps in the future we could consider a flag on ir.cron jobs
# that would cause database wake-up even if the database has not been
# loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something)
# See also openerp.cron
_name = "ir.cron"
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'active': fields.boolean('Active'),
'interval_number': fields.integer('Interval Number',help="Repeat every x."),
'interval_type': fields.selection( [('minutes', 'Minutes'),
('hours', 'Hours'), ('work_days','Work Days'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'numbercall': fields.integer('Number of Calls', help='How many times the method is called,\na negative number indicates no limit.'),
'doall' : fields.boolean('Repeat Missed', help="Specify if missed occurrences should be executed when the server restarts."),
'nextcall' : fields.datetime('Next Execution Date', required=True, help="Next planned execution date for this job."),
'model': fields.char('Object', help="Model name on which the method to be called is located, e.g. 'res.partner'."),
'function': fields.char('Method', help="Name of the method to be called when this job is processed."),
'args': fields.text('Arguments', help="Arguments to be passed to the method, e.g. (uid,)."),
'priority': fields.integer('Priority', help='The priority of the job, as an integer: 0 means higher priority, 10 means lower priority.')
}
_defaults = {
'nextcall' : lambda *a: time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'priority' : 5,
'user_id' : lambda obj,cr,uid,context: uid,
'interval_number' : 1,
'interval_type' : 'months',
'numbercall' : 1,
'active' : 1,
}
def _check_args(self, cr, uid, ids, context=None):
try:
for this in self.browse(cr, uid, ids, context):
str2tuple(this.args)
except Exception:
return False
return True
_constraints = [
(_check_args, 'Invalid arguments', ['args']),
]
def _handle_callback_exception(self, cr, uid, model_name, method_name, args, job_id, job_exception):
""" Method called when an exception is raised by a job.
Simply logs the exception and rollback the transaction.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
:param job_exception: exception raised by the job.
"""
cr.rollback()
def _callback(self, cr, uid, model_name, method_name, args, job_id):
""" Run the method associated to a given job
It takes care of logging and exception handling.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
"""
try:
args = str2tuple(args)
openerp.modules.registry.RegistryManager.check_registry_signaling(cr.dbname)
registry = openerp.registry(cr.dbname)
if model_name in registry:
model = registry[model_name]
if hasattr(model, method_name):
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth)
if _logger.isEnabledFor(logging.DEBUG):
start_time = time.time()
getattr(model, method_name)(cr, uid, *args)
if _logger.isEnabledFor(logging.DEBUG):
end_time = time.time()
_logger.debug('%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name))
openerp.modules.registry.RegistryManager.signal_caches_change(cr.dbname)
else:
msg = "Method `%s.%s` does not exist." % (model_name, method_name)
_logger.warning(msg)
else:
msg = "Model `%s` does not exist." % model_name
_logger.warning(msg)
except Exception, e:
_logger.exception("Call of self.pool.get('%s').%s(cr, uid, *%r) failed in Job %s" % (model_name, method_name, args, job_id))
self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e)
def _process_job(self, job_cr, job, cron_cr):
""" Run a given job taking care of the repetition.
:param job_cr: cursor to use to execute the job, safe to commit/rollback
:param job: job to be run (as a dictionary).
:param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date,
must not be committed/rolled back!
"""
try:
with api.Environment.manage():
now = fields.datetime.context_timestamp(job_cr, job['user_id'], datetime.now())
nextcall = fields.datetime.context_timestamp(job_cr, job['user_id'], datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT))
numbercall = job['numbercall']
ok = False
while nextcall < now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(job_cr, job['user_id'], job['model'], job['function'], job['args'], job['id'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql = ''
if not numbercall:
addsql = ', active=False'
cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s",
(nextcall.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id']))
self.invalidate_cache(job_cr, SUPERUSER_ID)
finally:
job_cr.commit()
cron_cr.commit()
@classmethod
def _acquire_job(cls, db_name):
# TODO remove 'check' argument from addons/base_action_rule/base_action_rule.py
""" Try to process one cron job.
This selects in database all the jobs that should be processed. It then
tries to lock each of them and, if it succeeds, run the cron job (if it
doesn't succeed, it means the job was already locked to be taken care
of by another thread) and return.
If a job was processed, returns True, otherwise returns False.
"""
db = openerp.sql_db.db_connect(db_name)
threading.current_thread().dbname = db_name
jobs = []
try:
with db.cursor() as cr:
# Make sure the database we poll has the same version as the code of base
cr.execute("SELECT 1 FROM ir_module_module WHERE name=%s AND latest_version=%s", ('base', BASE_VERSION))
if cr.fetchone():
# Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1.
cr.execute("""SELECT * FROM ir_cron
WHERE numbercall != 0
AND active AND nextcall <= (now() at time zone 'UTC')
ORDER BY priority""")
jobs = cr.dictfetchall()
else:
_logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION)
except psycopg2.ProgrammingError, e:
if e.pgcode == '42P01':
# Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table
# The table ir_cron does not exist; this is probably not an OpenERP database.
_logger.warning('Tried to poll an undefined table on database %s.', db_name)
else:
raise
except Exception:
_logger.warning('Exception in cron:', exc_info=True)
for job in jobs:
lock_cr = db.cursor()
try:
# Try to grab an exclusive lock on the job row from within the task transaction
# Restrict to the same conditions as for the search since the job may have already
# been run by an other thread when cron is running in multi thread
lock_cr.execute("""SELECT *
FROM ir_cron
WHERE numbercall != 0
AND active
AND nextcall <= (now() at time zone 'UTC')
AND id=%s
FOR UPDATE NOWAIT""",
(job['id'],), log_exceptions=False)
locked_job = lock_cr.fetchone()
if not locked_job:
_logger.debug("Job `%s` already executed by another process/thread. skipping it", job['name'])
continue
# Got the lock on the job row, run its code
_logger.debug('Starting job `%s`.', job['name'])
job_cr = db.cursor()
try:
registry = openerp.registry(db_name)
registry[cls._name]._process_job(job_cr, job, lock_cr)
except Exception:
_logger.exception('Unexpected exception while processing cron job %r', job)
finally:
job_cr.close()
except psycopg2.OperationalError, e:
if e.pgcode == '55P03':
# Class 55: Object not in prerequisite state; 55P03: lock_not_available
_logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
continue
else:
# Unexpected OperationalError
raise
finally:
# we're exiting due to an exception while acquiring the lock
lock_cr.close()
if hasattr(threading.current_thread(), 'dbname'): # cron job could have removed it as side-effect
del threading.current_thread().dbname
def _try_lock(self, cr, uid, ids, context=None):
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
to make sure a following write() or unlink() will not block due
to a process currently executing those cron tasks"""
try:
cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
(tuple(ids),), log_exceptions=False)
except psycopg2.OperationalError:
cr.rollback() # early rollback to allow translations to work for the user feedback
raise osv.except_osv(_("Record cannot be modified right now"),
_("This cron task is currently being executed and may not be modified, "
"please try again in a few minutes"))
def create(self, cr, uid, vals, context=None):
res = super(ir_cron, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).unlink(cr, uid, ids, context=context)
return res
def try_write(self, cr, uid, ids, values, context=None):
try:
with cr.savepoint():
cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
(tuple(ids),), log_exceptions=False)
except psycopg2.OperationalError:
pass
else:
return super(ir_cron, self).write(cr, uid, ids, values, context=context)
return False
def toggle(self, cr, uid, ids, model, domain, context=None):
active = bool(self.pool[model].search_count(cr, uid, domain, context=context))
return self.try_write(cr, uid, ids, {'active': active}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
frascoweb/frasco | frasco/users/__init__.py | 1 | 10598 | from frasco.ext import *
from frasco.i18n import lazy_translate
from frasco.utils import populate_obj, extract_unmatched_items
from flask import render_template
from flask_login import LoginManager, logout_user, login_required, login_url, login_fresh, confirm_login, fresh_login_required, user_logged_in
import datetime
import os
from .user import *
from .model import *
from .jinja_ext import *
from .forms import *
from .tokens import *
from .tokens import TOKEN_NS_ACCESS_TOKEN
from .signals import *
from .password import *
from .blueprint import users_blueprint
from . import captcha
class FrascoUsersState(ExtensionState):
def __init__(self, *args, **kwargs):
super(FrascoUsersState, self).__init__(*args, **kwargs)
self.manager = LoginManager()
self.user_validators = []
self.override_builtin_user_validation = False
self.login_validators = []
self.password_validators = []
self.captcha_validator = None
self.user_request_loaders = []
def user_request_loader(self, func):
self.user_request_loaders.append(func)
return func
class FrascoUsers(Extension):
name = "frasco_users"
state_class = FrascoUsersState
defaults = {
# email
"must_provide_email": True,
"email_is_unique": True,
"email_allowed_domains": None,
# username
"must_provide_username": True,
"username_is_unique": True,
"forbidden_usernames": [],
"min_username_length": 1,
"allow_spaces_in_username": False,
"username_case_sensitive": False,
# password
"validate_password_regexps": None,
"prevent_password_reuse": False,
"max_password_reuse_saved": None,
"min_time_between_password_change": None,
"expire_password_after": None,
"max_password_length": 500, # used to prevent DOS attacks
# login
"allow_login": True,
"enable_2fa": False,
"login_view": "users.login",
"login_redirect": None, # redirect to url instead of login page
"login_form_class": LoginWithEmailForm,
"login_2fa_form_class": Login2FAForm,
"allow_email_or_username_login": True,
"remember_days": 365,
"redirect_after_login": "index",
"redirect_after_login_disallowed": None,
"2fa_issuer_name": None, # default is app.config['TITLE']
"2fa_remember_days": 60,
"2fa_remember_cookie_options": {},
# signup
"signup_redirect": None, # redirect to url instead of signup page
"allow_signup": True,
"signup_form_class": SignupForm,
"send_welcome_email": False,
"login_user_on_signup": True,
"rate_limit_count": None,
"rate_limit_period": 60,
"redirect_after_signup": "index",
"redirect_after_signup_disallowed": None, # go to login
# captcha
"debug_captcha": False,
"recaptcha_key": None,
"recaptcha_secret": None,
"hcaptcha_key": None,
"hcaptcha_secret": None,
# reset password
"reset_password_redirect": None, # redirect to url instead of reset password page
"allow_reset_password": True,
"send_reset_password_form_class": SendResetPasswordForm,
"reset_password_form_class": ResetPasswordForm,
"send_reset_password_email": True,
"reset_password_ttl": 3600, # 1h
"login_user_on_reset_password": True,
"redirect_after_reset_password_token": False,
"redirect_after_reset_password": "index",
"redirect_after_reset_password_disallowed": "users.login",
# email validation
"redirect_after_email_validated": "index",
"email_validation_ttl": None,
"block_non_email_validated_users": False,
"send_email_validation_email": False,
# logout
"redirect_after_logout": "index",
# oauth
"oauth_signup_only": False,
"oauth_login_only": False,
"oauth_must_signup": False,
"oauth_must_provide_password": False,
# auth
"disable_password_authentication": False,
"default_auth_provider_name": "app",
# access tokens
"enable_access_tokens": False,
"access_tokens_ttl": None,
"enable_access_tokens_web_flow": True,
"access_tokens_web_flow_allowed_redirects": [],
# messages
"login_error_message": lazy_translate("Invalid email or password"),
"login_disallowed_message": None,
"login_2fa_error_message": lazy_translate("Invalid two factor authentification code"),
"login_required_message": lazy_translate("Please log in to access this page"),
"fresh_login_required_message": lazy_translate("Please reauthenticate to access this page"),
"password_expired_message": lazy_translate("Your password has expired, please enter a new one"),
"must_provide_username_message": lazy_translate("A username must be provided"),
"password_reused_message": lazy_translate("You cannot use a password which you have previously used"),
"min_time_between_password_change_message": lazy_translate("You have changed your password too recently"),
"validate_password_regexps_message": lazy_translate("The password does not respect the following rule: {rule}"),
"max_password_length_message": lazy_translate("The password is too long"),
"must_provide_email_message": lazy_translate("An email address must be provided"),
"signup_disallowed_message": None,
"username_taken_message": lazy_translate("An account using the same username already exists"),
"email_taken_message": lazy_translate("An account using the same email already exists"),
"username_too_short_message": lazy_translate("The username is too short"),
"username_has_spaces_message": lazy_translate("The username cannot contain spaces"),
"password_confirm_failed_message": lazy_translate("The two passwords do not match"),
"bad_signup_code_message": lazy_translate("The provided code is not valid"),
"rate_limit_reached_message": lazy_translate("Too many accounts have been created from this location in a too short period. Please, try again later"),
"reset_password_token_error_message": lazy_translate("This email does not exist in our database"),
"reset_password_token_success_message": lazy_translate("An email has been sent to your email address with a link to reset your password"),
"reset_password_error_message": lazy_translate("Invalid or expired link to reset your password"),
"reset_password_success_message": lazy_translate("Password successfully resetted"),
"reset_password_disallowed_message": lazy_translate("You are not allowed to reset your password"),
"update_password_error_message": lazy_translate("Invalid current password"),
"update_user_email_error_message": lazy_translate("An account using the same email already exists"),
"oauth_user_denied_login": lazy_translate("Login was denied"),
"oauth_user_already_exists_message": lazy_translate("This {provider} account has already been used on a different account"),
"oauth_error": lazy_translate("An error occured while authentifying you with the remote provider"),
"captcha_fail_message": lazy_translate("The captcha validation has failed"),
"email_validation_success_message": None,
"enable_admin": True
}
def _init_app(self, app, state):
state.Model = state.import_option('model')
state.LoginModel = state.import_option('login_model', required=False)
app.config.setdefault("REMEMBER_COOKIE_DURATION", datetime.timedelta(days=state.options["remember_days"]))
app.register_blueprint(users_blueprint)
app.jinja_env.add_extension(LoginRequiredExtension)
app.jinja_env.add_extension(AnonymousOnlyExtension)
state.manager.init_app(app)
state.manager.login_view = state.options['login_view']
state.manager.login_message_category = "error"
populate_obj(state.manager, extract_unmatched_items(state.options, self.defaults))
if state.options['recaptcha_key']:
state.captcha_validator = captcha.validate_recaptcha
elif state.options['hcaptcha_key']:
state.captcha_validator = captcha.validate_hcaptcha
if has_extension("frasco_mail", app):
app.extensions.frasco_mail.add_templates_from_package(__name__)
if has_extension("frasco_babel", app):
app.extensions.frasco_babel.add_extract_dir(os.path.dirname(__file__), ["templates"])
@state.manager.user_loader
def user_loader(id):
return state.Model.query.filter(getattr(state.Model, getattr(state.Model, '__session_cookie_identifier__', 'id')) == id).first()
@state.manager.request_loader
def request_loaders(request):
for loader in state.user_request_loaders:
user = loader(request)
if user:
return user
if state.options['enable_access_tokens']:
@state.user_request_loader
def access_token_user_loader(request):
access_token = request.args.get('access_token')
if 'Authorization' in request.headers:
authz = request.headers['Authorization']
if authz.startswith('Bearer '):
access_token = authz[7:]
if access_token:
return read_user_token(access_token, TOKEN_NS_ACCESS_TOKEN, state.options['access_tokens_ttl'])
if state.options['block_non_email_validated_users']:
@app.before_request
def block_non_email_validated_users():
if current_user.is_authenticated and not current_user.email_validated and \
request.endpoint not in ('users.logout', 'users.send_email_validation_email', 'users.validate_email'):
return render_template("users/non_email_validated_users_block_page.html")
@ext_stateful_method
def user_validator(self, state, func):
state.user_validators.append(func)
return func
@ext_stateful_method
def login_validator(self, state, func):
state.login_validators.append(func)
return func
@ext_stateful_method
def password_validator(self, state, func):
state.password_validators.append(func)
return func
| mit |
bootandy/sqlalchemy | test/orm/test_update_delete.py | 22 | 32553 | from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy import Integer, String, ForeignKey, or_, exc, \
select, func, Boolean, case, text, column
from sqlalchemy.orm import mapper, relationship, backref, Session, \
joinedload, synonym, query
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
class UpdateDeleteTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(32)),
Column('age_int', Integer))
Table(
"addresses", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', ForeignKey('users.id'))
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def insert_data(cls):
users = cls.tables.users
users.insert().execute([
dict(id=1, name='john', age_int=25),
dict(id=2, name='jack', age_int=47),
dict(id=3, name='jill', age_int=29),
dict(id=4, name='jane', age_int=37),
])
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
Address = cls.classes.Address
addresses = cls.tables.addresses
mapper(User, users, properties={
'age': users.c.age_int,
'addresses': relationship(Address)
})
mapper(Address, addresses)
def test_illegal_eval(self):
User = self.classes.User
s = Session()
assert_raises_message(
exc.ArgumentError,
"Valid strategies for session synchronization "
"are 'evaluate', 'fetch', False",
s.query(User).update,
{},
synchronize_session="fake"
)
def test_illegal_operations(self):
User = self.classes.User
Address = self.classes.Address
s = Session()
for q, mname in (
(s.query(User).limit(2), r"limit\(\)"),
(s.query(User).offset(2), r"offset\(\)"),
(s.query(User).limit(2).offset(2), r"limit\(\)"),
(s.query(User).order_by(User.id), r"order_by\(\)"),
(s.query(User).group_by(User.id), r"group_by\(\)"),
(s.query(User).distinct(), r"distinct\(\)"),
(s.query(User).join(User.addresses),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)"),
(s.query(User).outerjoin(User.addresses),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)"),
(s.query(User).select_from(Address),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)"),
(s.query(User).from_self(),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)"),
):
assert_raises_message(
exc.InvalidRequestError,
r"Can't call Query.update\(\) or Query.delete\(\) when "
"%s has been called" % mname,
q.update,
{'name': 'ed'})
assert_raises_message(
exc.InvalidRequestError,
r"Can't call Query.update\(\) or Query.delete\(\) when "
"%s has been called" % mname,
q.delete)
def test_evaluate_clauseelement(self):
User = self.classes.User
class Thing(object):
def __clause_element__(self):
return User.name.__clause_element__()
s = Session()
jill = s.query(User).get(3)
s.query(User).update(
{Thing(): 'moonbeam'},
synchronize_session='evaluate')
eq_(jill.name, 'moonbeam')
def test_evaluate_invalid(self):
User = self.classes.User
class Thing(object):
def __clause_element__(self):
return 5
s = Session()
assert_raises_message(
exc.InvalidRequestError,
"Invalid expression type: 5",
s.query(User).update, {Thing(): 'moonbeam'},
synchronize_session='evaluate'
)
def test_evaluate_unmapped_col(self):
User = self.classes.User
s = Session()
jill = s.query(User).get(3)
s.query(User).update(
{column('name'): 'moonbeam'},
synchronize_session='evaluate')
eq_(jill.name, 'jill')
s.expire(jill)
eq_(jill.name, 'moonbeam')
def test_evaluate_synonym_string(self):
class Foo(object):
pass
mapper(Foo, self.tables.users, properties={
'uname': synonym("name", )
})
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{'uname': 'moonbeam'},
synchronize_session='evaluate')
eq_(jill.uname, 'moonbeam')
def test_evaluate_synonym_attr(self):
class Foo(object):
pass
mapper(Foo, self.tables.users, properties={
'uname': synonym("name", )
})
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{Foo.uname: 'moonbeam'},
synchronize_session='evaluate')
eq_(jill.uname, 'moonbeam')
def test_evaluate_double_synonym_attr(self):
class Foo(object):
pass
mapper(Foo, self.tables.users, properties={
'uname': synonym("name"),
'ufoo': synonym('uname')
})
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{Foo.ufoo: 'moonbeam'},
synchronize_session='evaluate')
eq_(jill.ufoo, 'moonbeam')
def test_evaluate_hybrid_attr(self):
from sqlalchemy.ext.hybrid import hybrid_property
class Foo(object):
@hybrid_property
def uname(self):
return self.name
mapper(Foo, self.tables.users)
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{Foo.uname: 'moonbeam'},
synchronize_session='evaluate')
eq_(jill.uname, 'moonbeam')
def test_delete(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == 'john', User.name == 'jill')).delete()
assert john not in sess and jill not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jane])
def test_delete_against_metadata(self):
User = self.classes.User
users = self.tables.users
sess = Session()
sess.query(users).delete(synchronize_session=False)
eq_(sess.query(User).count(), 0)
def test_delete_with_bindparams(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(text('name = :name')).params(
name='john').delete('fetch')
assert john not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jill, jane])
def test_delete_rollback(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session='evaluate')
assert john not in sess and jill not in sess
sess.rollback()
assert john in sess and jill in sess
def test_delete_rollback_with_fetch(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session='fetch')
assert john not in sess and jill not in sess
sess.rollback()
assert john in sess and jill in sess
def test_delete_without_session_sync(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session=False)
assert john in sess and jill in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jane])
def test_delete_with_fetch_strategy(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session='fetch')
assert john not in sess and jill not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jane])
@testing.fails_on('mysql', 'FIXME: unknown')
def test_delete_invalid_evaluation(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
assert_raises(exc.InvalidRequestError,
sess.query(User).
filter(
User.name == select([func.max(User.name)])).delete,
synchronize_session='evaluate'
)
sess.query(User).filter(User.name == select([func.max(User.name)])).\
delete(synchronize_session='fetch')
assert john not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jill, jane])
def test_update(self):
User, users = self.classes.User, self.tables.users
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 37, 29, 27])))
sess.query(User).filter(User.age > 29).\
update({User.age: User.age - 10}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25, 27, 29, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 27, 29, 27])))
sess.query(User).filter(User.age > 27).\
update(
{users.c.age_int: User.age - 10},
synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25, 27, 19, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 27, 19, 27])))
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [15, 27, 19, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([15, 27, 19, 27])))
def test_update_against_table_col(self):
User, users = self.classes.User, self.tables.users
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
eq_([john.age, jack.age, jill.age, jane.age], [25, 47, 29, 37])
sess.query(User).filter(User.age > 27).\
update(
{users.c.age_int: User.age - 10},
synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 19, 27])
def test_update_against_metadata(self):
User, users = self.classes.User, self.tables.users
sess = Session()
sess.query(users).update(
{users.c.age_int: 29}, synchronize_session=False)
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([29, 29, 29, 29])))
def test_update_with_bindparams(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(text('age_int > :x')).params(x=29).\
update({'age': User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 37, 29, 27])))
def test_update_without_load(self):
User = self.classes.User
sess = Session()
sess.query(User).filter(User.id == 3).\
update({'age': 44}, synchronize_session='fetch')
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 47, 44, 37])))
def test_update_changes_resets_dirty(self):
User = self.classes.User
sess = Session(autoflush=False)
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
john.age = 50
jack.age = 37
# autoflush is false. therefore our '50' and '37' are getting
# blown away by this operation.
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='evaluate')
for x in (john, jack, jill, jane):
assert not sess.is_modified(x)
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
john.age = 25
assert john in sess.dirty
assert jack in sess.dirty
assert jill not in sess.dirty
assert not sess.is_modified(john)
assert not sess.is_modified(jack)
def test_update_changes_with_autoflush(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
john.age = 50
jack.age = 37
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='evaluate')
for x in (john, jack, jill, jane):
assert not sess.is_modified(x)
eq_([john.age, jack.age, jill.age, jane.age], [40, 27, 29, 27])
john.age = 25
assert john in sess.dirty
assert jack not in sess.dirty
assert jill not in sess.dirty
assert sess.is_modified(john)
assert not sess.is_modified(jack)
def test_update_with_expire_strategy(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 37, 29, 27])))
@testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount)
def test_update_returns_rowcount(self):
User = self.classes.User
sess = Session()
rowcount = sess.query(User).filter(
User.age > 29).update({'age': User.age + 0})
eq_(rowcount, 2)
rowcount = sess.query(User).filter(
User.age > 29).update({'age': User.age - 10})
eq_(rowcount, 2)
@testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount)
def test_delete_returns_rowcount(self):
User = self.classes.User
sess = Session()
rowcount = sess.query(User).filter(User.age > 26).\
delete(synchronize_session=False)
eq_(rowcount, 3)
def test_update_all(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).update({'age': 42}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [42, 42, 42, 42])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([42, 42, 42, 42])))
def test_delete_all(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).delete(synchronize_session='evaluate')
assert not (
john in sess or jack in sess or jill in sess or jane in sess)
eq_(sess.query(User).count(), 0)
def test_autoflush_before_evaluate_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
update({'age': 42},
synchronize_session='evaluate')
eq_(john.age, 42)
def test_autoflush_before_fetch_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
update({'age': 42},
synchronize_session='fetch')
eq_(john.age, 42)
def test_autoflush_before_evaluate_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
delete(
synchronize_session='evaluate')
assert john not in sess
def test_autoflush_before_fetch_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
delete(
synchronize_session='fetch')
assert john not in sess
def test_evaluate_before_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
# eval must be before the update. otherwise
# we eval john, age has been expired and doesn't
# match the new value coming in
sess.query(User).filter_by(name='john').filter_by(age=25).\
update({'name': 'j2', 'age': 40},
synchronize_session='evaluate')
eq_(john.name, 'j2')
eq_(john.age, 40)
def test_fetch_before_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
sess.query(User).filter_by(name='john').filter_by(age=25).\
update({'name': 'j2', 'age': 40},
synchronize_session='fetch')
eq_(john.name, 'j2')
eq_(john.age, 40)
def test_evaluate_before_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
sess.query(User).filter_by(name='john').\
filter_by(age=25).\
delete(
synchronize_session='evaluate')
assert john not in sess
def test_fetch_before_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
sess.query(User).filter_by(name='john').\
filter_by(age=25).\
delete(
synchronize_session='fetch')
assert john not in sess
class UpdateDeleteIgnoresLoadersTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(32)),
Column('age', Integer))
Table('documents', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
Column('title', String(32)))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Document(cls.Comparable):
pass
@classmethod
def insert_data(cls):
users = cls.tables.users
users.insert().execute([
dict(id=1, name='john', age=25),
dict(id=2, name='jack', age=47),
dict(id=3, name='jill', age=29),
dict(id=4, name='jane', age=37),
])
documents = cls.tables.documents
documents.insert().execute([
dict(id=1, user_id=1, title='foo'),
dict(id=2, user_id=1, title='bar'),
dict(id=3, user_id=2, title='baz'),
])
@classmethod
def setup_mappers(cls):
documents, Document, User, users = (cls.tables.documents,
cls.classes.Document,
cls.classes.User,
cls.tables.users)
mapper(User, users)
mapper(Document, documents, properties={
'user': relationship(User, lazy='joined',
backref=backref('documents', lazy='select'))
})
def test_update_with_eager_relationships(self):
Document = self.classes.Document
sess = Session()
foo, bar, baz = sess.query(Document).order_by(Document.id).all()
sess.query(Document).filter(Document.user_id == 1).\
update({'title': Document.title + Document.title},
synchronize_session='fetch')
eq_([foo.title, bar.title, baz.title], ['foofoo', 'barbar', 'baz'])
eq_(sess.query(Document.title).order_by(Document.id).all(),
list(zip(['foofoo', 'barbar', 'baz'])))
def test_update_with_explicit_joinedload(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).options(
joinedload(User.documents)).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(sess.query(User.age).order_by(
User.id).all(), list(zip([25, 37, 29, 27])))
def test_delete_with_eager_relationships(self):
Document = self.classes.Document
sess = Session()
sess.query(Document).filter(Document.user_id == 1).\
delete(synchronize_session=False)
eq_(sess.query(Document.title).all(), list(zip(['baz'])))
class UpdateDeleteFromTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('samename', String(10)),
)
Table('documents', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('title', String(32)),
Column('flag', Boolean),
Column('samename', String(10)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Document(cls.Comparable):
pass
@classmethod
def insert_data(cls):
users = cls.tables.users
users.insert().execute([
dict(id=1, ),
dict(id=2, ),
dict(id=3, ),
dict(id=4, ),
])
documents = cls.tables.documents
documents.insert().execute([
dict(id=1, user_id=1, title='foo'),
dict(id=2, user_id=1, title='bar'),
dict(id=3, user_id=2, title='baz'),
dict(id=4, user_id=2, title='hoho'),
dict(id=5, user_id=3, title='lala'),
dict(id=6, user_id=3, title='bleh'),
])
@classmethod
def setup_mappers(cls):
documents, Document, User, users = (cls.tables.documents,
cls.classes.Document,
cls.classes.User,
cls.tables.users)
mapper(User, users)
mapper(Document, documents, properties={
'user': relationship(User, backref='documents')
})
@testing.requires.update_from
def test_update_from_joined_subq_test(self):
Document = self.classes.Document
s = Session()
subq = s.query(func.max(Document.title).label('title')).\
group_by(Document.user_id).subquery()
s.query(Document).filter(Document.title == subq.c.title).\
update({'flag': True}, synchronize_session=False)
eq_(
set(s.query(Document.id, Document.flag)),
set([
(1, True), (2, None),
(3, None), (4, True),
(5, True), (6, None)])
)
def test_no_eval_against_multi_table_criteria(self):
User = self.classes.User
Document = self.classes.Document
s = Session()
q = s.query(User).filter(User.id == Document.user_id)
assert_raises_message(
exc.InvalidRequestError,
"Could not evaluate current criteria in Python.",
q.update,
{"name": "ed"}
)
@testing.requires.update_where_target_in_subquery
def test_update_using_in(self):
Document = self.classes.Document
s = Session()
subq = s.query(func.max(Document.title).label('title')).\
group_by(Document.user_id).subquery()
s.query(Document).filter(Document.title.in_(subq)).\
update({'flag': True}, synchronize_session=False)
eq_(
set(s.query(Document.id, Document.flag)),
set([
(1, True), (2, None),
(3, None), (4, True),
(5, True), (6, None)])
)
@testing.requires.update_where_target_in_subquery
@testing.requires.standalone_binds
def test_update_using_case(self):
Document = self.classes.Document
s = Session()
subq = s.query(func.max(Document.title).label('title')).\
group_by(Document.user_id).subquery()
# this would work with Firebird if you do literal_column('1')
# instead
case_stmt = case([(Document.title.in_(subq), True)], else_=False)
s.query(Document).update(
{'flag': case_stmt}, synchronize_session=False)
eq_(
set(s.query(Document.id, Document.flag)),
set([
(1, True), (2, False),
(3, False), (4, True),
(5, True), (6, False)])
)
@testing.only_on('mysql', 'Multi table update')
def test_update_from_multitable_same_names(self):
Document = self.classes.Document
User = self.classes.User
s = Session()
s.query(Document).\
filter(User.id == Document.user_id).\
filter(User.id == 2).update({
Document.samename: 'd_samename',
User.samename: 'u_samename'
}, synchronize_session=False)
eq_(
s.query(User.id, Document.samename, User.samename).
filter(User.id == Document.user_id).
order_by(User.id).all(),
[
(1, None, None),
(1, None, None),
(2, 'd_samename', 'u_samename'),
(2, 'd_samename', 'u_samename'),
(3, None, None),
(3, None, None),
]
)
class ExpressionUpdateTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
data = Table('data', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('counter', Integer, nullable=False, default=0)
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
data = cls.tables.data
mapper(cls.classes.Data, data, properties={'cnt': data.c.counter})
@testing.provide_metadata
def test_update_attr_names(self):
Data = self.classes.Data
d1 = Data()
sess = Session()
sess.add(d1)
sess.commit()
eq_(d1.cnt, 0)
sess.query(Data).update({Data.cnt: Data.cnt + 1})
sess.flush()
eq_(d1.cnt, 1)
sess.query(Data).update({Data.cnt: Data.cnt + 1}, 'fetch')
sess.flush()
eq_(d1.cnt, 2)
sess.close()
def test_update_args(self):
Data = self.classes.Data
session = testing.mock.Mock(wraps=Session())
update_args = {"mysql_limit": 1}
query.Query(Data, session).update({Data.cnt: Data.cnt + 1},
update_args=update_args)
eq_(session.execute.call_count, 1)
args, kwargs = session.execute.call_args
eq_(len(args), 1)
update_stmt = args[0]
eq_(update_stmt.dialect_kwargs, update_args)
class InheritTest(fixtures.DeclarativeMappedTest):
run_inserts = 'each'
run_deletes = 'each'
__backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Person(Base):
__tablename__ = 'person'
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True)
type = Column(String(50))
name = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
engineer_name = Column(String(50))
class Manager(Person):
__tablename__ = 'manager'
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
manager_name = Column(String(50))
@classmethod
def insert_data(cls):
Engineer, Person, Manager = cls.classes.Engineer, \
cls.classes.Person, cls.classes.Manager
s = Session(testing.db)
s.add_all([
Engineer(name='e1', engineer_name='e1'),
Manager(name='m1', manager_name='m1'),
Engineer(name='e2', engineer_name='e2'),
Person(name='p1'),
])
s.commit()
def test_illegal_metadata(self):
person = self.classes.Person.__table__
engineer = self.classes.Engineer.__table__
sess = Session()
assert_raises_message(
exc.InvalidRequestError,
"This operation requires only one Table or entity be "
"specified as the target.",
sess.query(person.join(engineer)).update, {}
)
def test_update_subtable_only(self):
Engineer = self.classes.Engineer
s = Session(testing.db)
s.query(Engineer).update({'engineer_name': 'e5'})
eq_(
s.query(Engineer.engineer_name).all(),
[('e5', ), ('e5', )]
)
@testing.requires.update_from
def test_update_from(self):
Engineer = self.classes.Engineer
Person = self.classes.Person
s = Session(testing.db)
s.query(Engineer).filter(Engineer.id == Person.id).\
filter(Person.name == 'e2').update({'engineer_name': 'e5'})
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
set([('e1', 'e1', ), ('e2', 'e5')])
)
@testing.only_on('mysql', 'Multi table update')
def test_update_from_multitable(self):
Engineer = self.classes.Engineer
Person = self.classes.Person
s = Session(testing.db)
s.query(Engineer).filter(Engineer.id == Person.id).\
filter(Person.name == 'e2').update({Person.name: 'e22',
Engineer.engineer_name: 'e55'})
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
set([('e1', 'e1', ), ('e22', 'e55')])
)
| mit |
maestrano/odoo | addons/decimal_precision/decimal_precision.py | 27 | 3965 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import orm, fields
from openerp.modules.registry import RegistryManager
class decimal_precision(orm.Model):
_name = 'decimal.precision'
_columns = {
'name': fields.char('Usage', select=True, required=True),
'digits': fields.integer('Digits', required=True),
}
_defaults = {
'digits': 2,
}
_sql_constraints = [
('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""),
]
@tools.ormcache(skiparg=3)
def precision_get(self, cr, uid, application):
cr.execute('select digits from decimal_precision where name=%s', (application,))
res = cr.fetchone()
return res[0] if res else 2
def clear_cache(self, cr):
"""clear cache and update models. Notify other workers to restart their registry."""
self.precision_get.clear_cache(self)
env = openerp.api.Environment(cr, SUPERUSER_ID, {})
for model in self.pool.values():
for field in model._fields.values():
if field.type == 'float':
field._setup_digits(env)
RegistryManager.signal_caches_change(cr.dbname)
def create(self, cr, uid, data, context=None):
res = super(decimal_precision, self).create(cr, uid, data, context=context)
self.clear_cache(cr)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(decimal_precision, self).unlink(cr, uid, ids, context=context)
self.clear_cache(cr)
return res
def write(self, cr, uid, ids, data, *args, **argv):
res = super(decimal_precision, self).write(cr, uid, ids, data, *args, **argv)
self.clear_cache(cr)
return res
def get_precision(application):
def change_digit(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, SUPERUSER_ID, application)
return (16, res)
return change_digit
class DecimalPrecisionFloat(orm.AbstractModel):
""" Override qweb.field.float to add a `decimal_precision` domain option
and use that instead of the column's own value if it is specified
"""
_inherit = 'ir.qweb.field.float'
def precision(self, cr, uid, column, options=None, context=None):
dp = options and options.get('decimal_precision')
if dp:
return self.pool['decimal.precision'].precision_get(
cr, uid, dp)
return super(DecimalPrecisionFloat, self).precision(
cr, uid, column, options=options, context=context)
class DecimalPrecisionTestModel(orm.Model):
_name = 'decimal.precision.test'
_columns = {
'float': fields.float(),
'float_2': fields.float(digits=(16, 2)),
'float_4': fields.float(digits=(16, 4)),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ewandor/home-assistant | tests/util/test_distance.py | 42 | 3525 | """Test homeasssitant distance utility functions."""
import unittest
import homeassistant.util.distance as distance_util
from homeassistant.const import (LENGTH_KILOMETERS, LENGTH_METERS, LENGTH_FEET,
LENGTH_MILES)
INVALID_SYMBOL = 'bob'
VALID_SYMBOL = LENGTH_KILOMETERS
class TestDistanceUtil(unittest.TestCase):
"""Test the distance utility functions."""
def test_convert_same_unit(self):
"""Test conversion from any unit to same unit."""
self.assertEqual(5,
distance_util.convert(5, LENGTH_KILOMETERS,
LENGTH_KILOMETERS))
self.assertEqual(2,
distance_util.convert(2, LENGTH_METERS,
LENGTH_METERS))
self.assertEqual(10,
distance_util.convert(10, LENGTH_MILES, LENGTH_MILES))
self.assertEqual(9,
distance_util.convert(9, LENGTH_FEET, LENGTH_FEET))
def test_convert_invalid_unit(self):
"""Test exception is thrown for invalid units."""
with self.assertRaises(ValueError):
distance_util.convert(5, INVALID_SYMBOL,
VALID_SYMBOL)
with self.assertRaises(ValueError):
distance_util.convert(5, VALID_SYMBOL,
INVALID_SYMBOL)
def test_convert_nonnumeric_value(self):
"""Test exception is thrown for nonnumeric type."""
with self.assertRaises(TypeError):
distance_util.convert('a', LENGTH_KILOMETERS, LENGTH_METERS)
def test_convert_from_miles(self):
"""Test conversion from miles to other units."""
miles = 5
self.assertEqual(
distance_util.convert(miles, LENGTH_MILES, LENGTH_KILOMETERS),
8.04672)
self.assertEqual(
distance_util.convert(miles, LENGTH_MILES, LENGTH_METERS),
8046.72)
self.assertEqual(
distance_util.convert(miles, LENGTH_MILES, LENGTH_FEET),
26400.0008448)
def test_convert_from_feet(self):
"""Test conversion from feet to other units."""
feet = 5000
self.assertEqual(
distance_util.convert(feet, LENGTH_FEET, LENGTH_KILOMETERS),
1.524)
self.assertEqual(
distance_util.convert(feet, LENGTH_FEET, LENGTH_METERS),
1524)
self.assertEqual(
distance_util.convert(feet, LENGTH_FEET, LENGTH_MILES),
0.9469694040000001)
def test_convert_from_kilometers(self):
"""Test conversion from kilometers to other units."""
km = 5
self.assertEqual(
distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_FEET),
16404.2)
self.assertEqual(
distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_METERS),
5000)
self.assertEqual(
distance_util.convert(km, LENGTH_KILOMETERS, LENGTH_MILES),
3.106855)
def test_convert_from_meters(self):
"""Test conversion from meters to other units."""
m = 5000
self.assertEqual(distance_util.convert(m, LENGTH_METERS, LENGTH_FEET),
16404.2)
self.assertEqual(
distance_util.convert(m, LENGTH_METERS, LENGTH_KILOMETERS),
5)
self.assertEqual(distance_util.convert(m, LENGTH_METERS, LENGTH_MILES),
3.106855)
| apache-2.0 |
kevin-intel/scikit-learn | sklearn/preprocessing/tests/test_discretization.py | 3 | 12009 |
import pytest
import numpy as np
import scipy.sparse as sp
import warnings
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_array_equal,
assert_allclose_dense_sparse
)
X = [[-2, 1.5, -4, -1],
[-1, 2.5, -3, -0.5],
[0, 3.5, -2, 0.5],
[1, 4.5, -1, 2]]
@pytest.mark.parametrize(
'strategy, expected',
[('uniform', [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]]),
('kmeans', [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]),
('quantile', [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]])])
def test_fit_transform(strategy, expected):
est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy=strategy)
est.fit(X)
assert_array_equal(expected, est.transform(X))
def test_valid_n_bins():
KBinsDiscretizer(n_bins=2).fit_transform(X)
KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X)
assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int)
def test_invalid_n_bins():
est = KBinsDiscretizer(n_bins=1)
err_msg = ("KBinsDiscretizer received an invalid "
"number of bins. Received 1, expected at least 2.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
est = KBinsDiscretizer(n_bins=1.1)
err_msg = ("KBinsDiscretizer received an invalid "
"n_bins type. Received float, expected int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
def test_invalid_n_bins_array():
# Bad shape
n_bins = np.full((2, 4), 2.)
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Incorrect number of features
n_bins = [1, 2, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Bad bin values
n_bins = [1, 2, 2, 1]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = ("KBinsDiscretizer received an invalid number of bins "
"at indices 0, 3. Number of bins must be at least 2, "
"and must be an int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Float bin values
n_bins = [2.1, 2, 2.1, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = ("KBinsDiscretizer received an invalid number of bins "
"at indices 0, 2. Number of bins must be at least 2, "
"and must be an int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
@pytest.mark.parametrize(
'strategy, expected',
[('uniform', [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]]),
('kmeans', [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]]),
('quantile', [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]])])
def test_fit_transform_n_bins_array(strategy, expected):
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode='ordinal',
strategy=strategy).fit(X)
assert_array_equal(expected, est.transform(X))
# test the shape of bin_edges_
n_features = np.array(X).shape[1]
assert est.bin_edges_.shape == (n_features, )
for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_):
assert bin_edges.shape == (n_bins + 1, )
@pytest.mark.parametrize('strategy', ['uniform', 'kmeans', 'quantile'])
def test_same_min_max(strategy):
warnings.simplefilter("always")
X = np.array([[1, -2],
[1, -1],
[1, 0],
[1, 1]])
est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode='ordinal')
warning_message = ("Feature 0 is constant and will be replaced "
"with 0.")
with pytest.warns(UserWarning, match=warning_message):
est.fit(X)
assert est.n_bins_[0] == 1
# replace the feature with zeros
Xt = est.transform(X)
assert_array_equal(Xt[:, 0], np.zeros(X.shape[0]))
def test_transform_1d_behavior():
X = np.arange(4)
est = KBinsDiscretizer(n_bins=2)
with pytest.raises(ValueError):
est.fit(X)
est = KBinsDiscretizer(n_bins=2)
est.fit(X.reshape(-1, 1))
with pytest.raises(ValueError):
est.transform(X)
@pytest.mark.parametrize('i', range(1, 9))
def test_numeric_stability(i):
X_init = np.array([2., 4., 6., 8., 10.]).reshape(-1, 1)
Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1)
# Test up to discretizing nano units
X = X_init / 10**i
Xt = KBinsDiscretizer(n_bins=2, encode='ordinal').fit_transform(X)
assert_array_equal(Xt_expected, Xt)
def test_invalid_encode_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode='invalid-encode')
err_msg = (r"Valid options for 'encode' are "
r"\('onehot', 'onehot-dense', 'ordinal'\). "
r"Got encode='invalid-encode' instead.")
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
def test_encode_options():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='ordinal').fit(X)
Xt_1 = est.transform(X)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='onehot-dense').fit(X)
Xt_2 = est.transform(X)
assert not sp.issparse(Xt_2)
assert_array_equal(OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]],
sparse=False)
.fit_transform(Xt_1), Xt_2)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='onehot').fit(X)
Xt_3 = est.transform(X)
assert sp.issparse(Xt_3)
assert_array_equal(OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]],
sparse=True)
.fit_transform(Xt_1).toarray(),
Xt_3.toarray())
def test_invalid_strategy_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], strategy='invalid-strategy')
err_msg = (r"Valid options for 'strategy' are "
r"\('uniform', 'quantile', 'kmeans'\). "
r"Got strategy='invalid-strategy' instead.")
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
@pytest.mark.parametrize(
'strategy, expected_2bins, expected_3bins, expected_5bins',
[('uniform', [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]),
('kmeans', [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]),
('quantile', [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4])])
def test_nonuniform_strategies(
strategy, expected_2bins, expected_3bins, expected_5bins):
X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1)
# with 2 bins
est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_2bins, Xt.ravel())
# with 3 bins
est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_3bins, Xt.ravel())
# with 5 bins
est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_5bins, Xt.ravel())
@pytest.mark.parametrize(
'strategy, expected_inv',
[('uniform', [[-1.5, 2., -3.5, -0.5], [-0.5, 3., -2.5, -0.5],
[0.5, 4., -1.5, 0.5], [0.5, 4., -1.5, 1.5]]),
('kmeans', [[-1.375, 2.125, -3.375, -0.5625],
[-1.375, 2.125, -3.375, -0.5625],
[-0.125, 3.375, -2.125, 0.5625],
[0.75, 4.25, -1.25, 1.625]]),
('quantile', [[-1.5, 2., -3.5, -0.75], [-0.5, 3., -2.5, 0.],
[0.5, 4., -1.5, 1.25], [0.5, 4., -1.5, 1.25]])])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_inverse_transform(strategy, encode, expected_inv):
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode)
Xt = kbd.fit_transform(X)
Xinv = kbd.inverse_transform(Xt)
assert_array_almost_equal(expected_inv, Xinv)
@pytest.mark.parametrize('strategy', ['uniform', 'kmeans', 'quantile'])
def test_transform_outside_fit_range(strategy):
X = np.array([0, 1, 2, 3])[:, None]
kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode='ordinal')
kbd.fit(X)
X2 = np.array([-2, 5])[:, None]
X2t = kbd.transform(X2)
assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_)
assert_array_equal(X2t.min(axis=0), [0])
def test_overwrite():
X = np.array([0, 1, 2, 3])[:, None]
X_before = X.copy()
est = KBinsDiscretizer(n_bins=3, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(X, X_before)
Xt_before = Xt.copy()
Xinv = est.inverse_transform(Xt)
assert_array_equal(Xt, Xt_before)
assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]]))
@pytest.mark.parametrize(
'strategy, expected_bin_edges',
[('quantile', [0, 1, 3]), ('kmeans', [0, 1.5, 3])])
def test_redundant_bins(strategy, expected_bin_edges):
X = [[0], [0], [0], [0], [3], [3]]
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy)
warning_message = ("Consider decreasing the number of bins.")
with pytest.warns(UserWarning, match=warning_message):
kbd.fit(X)
assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges)
def test_percentile_numeric_stability():
X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1)
bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95])
Xt = np.array([0, 0, 4]).reshape(-1, 1)
kbd = KBinsDiscretizer(n_bins=10, encode='ordinal',
strategy='quantile')
warning_message = ("Consider decreasing the number of bins.")
with pytest.warns(UserWarning, match=warning_message):
kbd.fit(X)
assert_array_almost_equal(kbd.bin_edges_[0], bin_edges)
assert_array_almost_equal(kbd.transform(X), Xt)
@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64])
@pytest.mark.parametrize("out_dtype", [None, np.float16, np.float32,
np.float64])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_consistent_dtype(in_dtype, out_dtype, encode):
X_input = np.array(X, dtype=in_dtype)
kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype)
# a error is raised if a wrong dtype is define for the model
if out_dtype not in [None, np.float32, np.float64]:
with pytest.raises(ValueError, match="Valid options for 'dtype' are"):
kbd.fit(X_input)
else:
kbd.fit(X_input)
# test output dtype
if out_dtype is not None:
expected_dtype = out_dtype
elif out_dtype is None and X_input.dtype == np.float16:
# wrong numeric input dtype are cast in np.float64
expected_dtype = np.float64
else:
expected_dtype = X_input.dtype
Xt = kbd.transform(X_input)
assert Xt.dtype == expected_dtype
@pytest.mark.parametrize('input_dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_32_equal_64(input_dtype, encode):
# TODO this check is redundant with common checks and can be removed
# once #16290 is merged
X_input = np.array(X, dtype=input_dtype)
# 32 bit output
kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32)
kbd_32.fit(X_input)
Xt_32 = kbd_32.transform(X_input)
# 64 bit output
kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64)
kbd_64.fit(X_input)
Xt_64 = kbd_64.transform(X_input)
assert_allclose_dense_sparse(Xt_32, Xt_64)
| bsd-3-clause |
nrgaway/qubes-tools | builder-tools/libs/say-1.4.2/test/test_util.py | 1 | 2680 | """
Test separable utility functions used in say
"""
import six
import sys
from say.util import *
import pytest
def test_is_string():
assert is_string("")
assert is_string("This")
assert is_string(six.u("this"))
assert stringify(six.u("a\u2014b"))
assert not is_string(1)
assert not is_string(None)
assert not is_string([1, 2, 3])
assert not is_string(['a', 'b', 'c'])
def test_stringify():
assert stringify('this') == 'this'
assert stringify(4) == '4'
assert stringify(six.u("\u2014")) == six.u("\u2014")
@pytest.mark.xfail
def test_opened():
raise NotImplementedError('TBD')
def test_encoded():
tests = {
('this', None): 'this',
('this', 'utf-8'): six.b('this'),
(six.u('this'), 'utf-8'): six.b('this'),
(six.u('this\u2012and'), 'utf-8'): six.b('this\xe2\x80\x92and'),
(six.u('this\u2012and'), None): six.u('this\u2012and'),
}
for data, answer in tests.items():
(text, encoding) = data
assert encoded(text, encoding) == answer
@pytest.mark.skipif('sys.version_info[:2] > (3,2)')
def test_encoded2():
# Currently fail this test on py33 and py34. Seems to relate to a routine
# that previously accepted strings only accepting bytes in py33 or later.
# Only affects encodings that are byte-oriented, such as base64. Not
# clear if makes sense to continue supporting them.
tests = {
(six.u('this-and'), 'base64'): 'dGhpcy1hbmQ=\n',
}
for data, answer in tests.items():
(text, encoding) = data
assert encoded(text, encoding) == answer
def test_flatten(*args):
tests = [
(1, [1]),
([1], [1]),
('one', ['one']),
(['one'], ['one']),
([2, 3, 4], [2, 3, 4])
]
for data, answer in tests:
assert [x for x in flatten(data)] == answer
def test_next_str():
def gen():
n = 1
while True:
yield str(n)
n += 1
g = gen()
for i in range(1, 5):
assert next_str(g) == str(i)
assert next_str(None) == ''
for gg in [1, 1.1, 'string', list, [1, 2, 3], {'a': 'A'}]:
assert next_str(gg) == str(gg)
@pytest.mark.skipif('True')
def test_csv_split():
# no quotes
assert csv_split('x,y,z') == ['x', 'y', 'z']
# double quotes
assert csv_split('x,y="simple",z') == ['x', 'y="simple"', 'z']
assert csv_split('x,y="internal comma,",z') == ['x', 'y="internal comma,"', 'z']
# single quotes
assert csv_split("x,y='simple',z") == ['x', "y='simple'", 'z']
assert csv_split("x,y='internal comma,',z") == ['x', "y='internal comma,'", 'z']
| gpl-2.0 |
GabrielBrascher/cloudstack | python/incubation/cloud-web-ipallocator.py | 4 | 4633 | #! /usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import web
import socket, struct
import cloud_utils
from cloud_utils import Command
urls = ("/ipallocator", "ipallocator")
app = web.application(urls, globals())
augtool = Command("augtool")
service = Command("service")
class dhcp:
_instance = None
def __init__(self):
self.availIP=[]
self.router=None
self.netmask=None
self.initialized=False
options = augtool.match("/files/etc/dnsmasq.conf/dhcp-option").stdout.decode('utf-8').strip()
for option in options.splitlines():
if option.find("option:router") != -1:
self.router = option.split("=")[1].strip().split(",")[1]
print(self.router)
dhcp_range = augtool.get("/files/etc/dnsmasq.conf/dhcp-range").stdout.decode('utf-8').strip()
dhcp_start = dhcp_range.split("=")[1].strip().split(",")[0]
dhcp_end = dhcp_range.split("=")[1].strip().split(",")[1]
self.netmask = dhcp_range.split("=")[1].strip().split(",")[2]
print(dhcp_start, dhcp_end, self.netmask)
start_ip_num = self.ipToNum(dhcp_start);
end_ip_num = self.ipToNum(dhcp_end)
print(start_ip_num, end_ip_num)
for ip in range(start_ip_num, end_ip_num + 1):
self.availIP.append(ip)
print(self.availIP[0], self.availIP[len(self.availIP) - 1])
#load the ip already allocated
self.reloadAllocatedIP()
def ipToNum(self, ip):
return struct.unpack("!I", socket.inet_aton(ip))[0]
def numToIp(self, num):
return socket.inet_ntoa(struct.pack('!I', num))
def getFreeIP(self):
if len(self.availIP) > 0:
ip = self.numToIp(self.availIP[0])
self.availIP.remove(self.availIP[0])
return ip
else:
return None
def getNetmask(self):
return self.netmask
def getRouter(self):
return self.router
def getInstance():
if not dhcp._instance:
dhcp._instance = dhcp()
return dhcp._instance
getInstance = staticmethod(getInstance)
def reloadAllocatedIP(self):
dhcp_hosts = augtool.match("/files/etc/dnsmasq.conf/dhcp-host").stdout.decode('utf-8').strip().splitlines()
for host in dhcp_hosts:
if host.find("dhcp-host") != -1:
allocatedIP = self.ipToNum(host.split("=")[1].strip().split(",")[1])
if allocatedIP in self.availIP:
self.availIP.remove(allocatedIP)
def allocateIP(self, mac):
newIP = self.getFreeIP()
dhcp_host = augtool.match("/files/etc/dnsmasq.conf/dhcp-host").stdout.decode('utf-8').strip()
cnt = len(dhcp_host.splitlines()) + 1
script = """set %s %s
save"""%("/files/etc/dnsmasq.conf/dhcp-host[" + str(cnt) + "]", str(mac) + "," + newIP)
augtool < script
#reset dnsmasq
service("dnsmasq", "restart", stdout=None, stderr=None)
return newIP
def releaseIP(self, ip):
dhcp_host = augtool.match("/files/etc/dnsmasq.conf/dhcp-host").stdout.decode('utf-8').strip()
path = None
for host in dhcp_host.splitlines():
if host.find(ip) != -1:
path = host.split("=")[0].strip()
if path == None:
print("Can't find " + str(ip) + " in conf file")
return None
print(path)
script = """rm %s
save"""%(path)
augtool < script
self.availIP.remove(ip)
#reset dnsmasq
service("dnsmasq", "restart", stdout=None, stderr=None)
class ipallocator:
def GET(self):
try:
user_data = web.input()
command = user_data.command
print("Processing: " + command)
dhcpInit = dhcp.getInstance()
if command == "getIpAddr":
mac = user_data.mac
zone_id = user_data.dc
pod_id = user_data.pod
print(mac, zone_id, pod_id)
freeIP = dhcpInit.allocateIP(mac)
if not freeIP:
return "0,0,0"
print("Find an available IP: " + freeIP)
return freeIP + "," + dhcpInit.getNetmask() + "," + dhcpInit.getRouter()
elif command == "releaseIpAddr":
ip = user_data.ip
zone_id = user_data.dc
pod_id = user_data.pod
dhcpInit.releaseIP(ip)
except:
return None
if __name__ == "__main__":
app.run()
| apache-2.0 |
Manishearth/servo | tests/wpt/harness/wptrunner/update/state.py | 196 | 4417 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import cPickle as pickle
here = os.path.abspath(os.path.split(__file__)[0])
class State(object):
filename = os.path.join(here, ".wpt-update.lock")
def __new__(cls, logger):
rv = cls.load(logger)
if rv is not None:
logger.debug("Existing state found")
return rv
logger.debug("No existing state found")
return object.__new__(cls, logger)
def __init__(self, logger):
"""Object containing state variables created when running Steps.
On write the state is serialized to disk, such that it can be restored in
the event that the program is interrupted before all steps are complete.
Note that this only works well if the values are immutable; mutating an
existing value will not cause the data to be serialized.
Variables are set and get as attributes e.g. state_obj.spam = "eggs".
:param parent: Parent State object or None if this is the root object.
"""
if hasattr(self, "_data"):
return
self._data = [{}]
self._logger = logger
self._index = 0
def __getstate__(self):
rv = self.__dict__.copy()
del rv["_logger"]
return rv
@classmethod
def load(cls, logger):
"""Load saved state from a file"""
try:
with open(cls.filename) as f:
try:
rv = pickle.load(f)
logger.debug("Loading data %r" % (rv._data,))
rv._logger = logger
rv._index = 0
return rv
except EOFError:
logger.warning("Found empty state file")
except IOError:
logger.debug("IOError loading stored state")
def push(self, init_values):
"""Push a new clean state dictionary
:param init_values: List of variable names in the current state dict to copy
into the new state dict."""
return StateContext(self, init_values)
def save(self):
"""Write the state to disk"""
with open(self.filename, "w") as f:
pickle.dump(self, f)
def is_empty(self):
return len(self._data) == 1 and self._data[0] == {}
def clear(self):
"""Remove all state and delete the stored copy."""
try:
os.unlink(self.filename)
except OSError:
pass
self._data = [{}]
def __setattr__(self, key, value):
if key.startswith("_"):
object.__setattr__(self, key, value)
else:
self._data[self._index][key] = value
self.save()
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
try:
return self._data[self._index][key]
except KeyError:
raise AttributeError
def __contains__(self, key):
return key in self._data[self._index]
def update(self, items):
"""Add a dictionary of {name: value} pairs to the state"""
self._data[self._index].update(items)
self.save()
def keys(self):
return self._data[self._index].keys()
class StateContext(object):
def __init__(self, state, init_values):
self.state = state
self.init_values = init_values
def __enter__(self):
if len(self.state._data) == self.state._index + 1:
# This is the case where there is no stored state
new_state = {}
for key in self.init_values:
new_state[key] = self.state._data[self.state._index][key]
self.state._data.append(new_state)
self.state._index += 1
self.state._logger.debug("Incremented index to %s" % self.state._index)
def __exit__(self, *args, **kwargs):
if len(self.state._data) > 1:
assert self.state._index == len(self.state._data) - 1
self.state._data.pop()
self.state._index -= 1
self.state._logger.debug("Decremented index to %s" % self.state._index)
assert self.state._index >= 0
else:
raise ValueError("Tried to pop the top state")
| mpl-2.0 |
3dfxmadscientist/CBSS | addons/account_voucher/report/__init__.py | 60 | 1135 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_voucher
import account_voucher_print
import account_voucher_sales_receipt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MarkWh1te/xueqiu_predict | p3_env/lib/python3.5/site-packages/pygments/lexers/jvm.py | 21 | 66829 | # -*- coding: utf-8 -*-
"""
pygments.lexers.jvm
~~~~~~~~~~~~~~~~~~~
Pygments lexers for JVM languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import shebang_matches
from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'ClojureScriptLexer',
'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer',
'PigLexer', 'GoloLexer', 'JasminLexer']
class JavaLexer(RegexLexer):
"""
For `Java <http://www.sun.com/java/>`_ source code.
"""
name = 'Java'
aliases = ['java']
filenames = ['*.java']
mimetypes = ['text/x-java']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
# keywords: go before method names to avoid lexing "throw new XYZ"
# as a method signature
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
Keyword),
# method names
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'@[^\W\d][\w.]*', Name.Decorator),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)),
(r'^\s*([^\W\d]|\$)[\w$]*:', Name.Label),
(r'([^\W\d]|\$)[\w$]*', Name),
(r'([0-9](_*[0-9]+)*\.([0-9](_*[0-9]+)*)?|'
r'([0-9](_*[0-9]+)*)?\.[0-9](_*[0-9]+)*)'
r'([eE][+\-]?[0-9](_*[0-9]+)*)?[fFdD]?|'
r'[0-9][eE][+\-]?[0-9](_*[0-9]+)*[fFdD]?|'
r'[0-9]([eE][+\-]?[0-9](_*[0-9]+)*)?[fFdD]|'
r'0[xX]([0-9a-fA-F](_*[0-9a-fA-F]+)*\.?|'
r'([0-9a-fA-F](_*[0-9a-fA-F]+)*)?\.[0-9a-fA-F](_*[0-9a-fA-F]+)*)'
r'[pP][+\-]?[0-9](_*[0-9]+)*[fFdD]?', Number.Float),
(r'0[xX][0-9a-fA-F](_*[0-9a-fA-F]+)*[lL]?', Number.Hex),
(r'0[bB][01](_*[01]+)*[lL]?', Number.Bin),
(r'0(_*[0-7]+)+[lL]?', Number.Oct),
(r'0|[1-9](_*[0-9]+)*[lL]?', Number.Integer),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'\n', Text)
],
'class': [
(r'([^\W\d]|\$)[\w$]*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
class AspectJLexer(JavaLexer):
"""
For `AspectJ <http://www.eclipse.org/aspectj/>`_ source code.
.. versionadded:: 1.6
"""
name = 'AspectJ'
aliases = ['aspectj']
filenames = ['*.aj']
mimetypes = ['text/x-aspectj']
aj_keywords = set((
'aspect', 'pointcut', 'privileged', 'call', 'execution',
'initialization', 'preinitialization', 'handler', 'get', 'set',
'staticinitialization', 'target', 'args', 'within', 'withincode',
'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around',
'proceed', 'throwing', 'returning', 'adviceexecution', 'declare',
'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint',
'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart',
'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow',
'pertypewithin', 'lock', 'unlock', 'thisAspectInstance'
))
aj_inter_type = set(('parents:', 'warning:', 'error:', 'soft:', 'precedence:'))
aj_inter_type_annotation = set(('@type', '@method', '@constructor', '@field'))
def get_tokens_unprocessed(self, text):
for index, token, value in JavaLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.aj_keywords:
yield index, Keyword, value
elif token is Name.Label and value in self.aj_inter_type:
yield index, Keyword, value[:-1]
yield index, Operator, value[-1]
elif token is Name.Decorator and value in self.aj_inter_type_annotation:
yield index, Keyword, value
else:
yield index, token, value
class ScalaLexer(RegexLexer):
"""
For `Scala <http://www.scala-lang.org>`_ source code.
"""
name = 'Scala'
aliases = ['scala']
filenames = ['*.scala']
mimetypes = ['text/x-scala']
flags = re.MULTILINE | re.DOTALL
# don't use raw unicode strings!
op = (u'[-~\\^\\*!%&\\\\<>\\|+=:/?@\u00a6-\u00a7\u00a9\u00ac\u00ae\u00b0-\u00b1'
u'\u00b6\u00d7\u00f7\u03f6\u0482\u0606-\u0608\u060e-\u060f\u06e9'
u'\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0cf1-\u0cf2'
u'\u0d79\u0f01-\u0f03\u0f13-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38'
u'\u0fbe-\u0fc5\u0fc7-\u0fcf\u109e-\u109f\u1360\u1390-\u1399\u1940'
u'\u19e0-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2044\u2052\u207a-\u207c'
u'\u208a-\u208c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2118'
u'\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u2140-\u2144'
u'\u214a-\u214d\u214f\u2190-\u2328\u232b-\u244a\u249c-\u24e9\u2500-\u2767'
u'\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb'
u'\u29fe-\u2b54\u2ce5-\u2cea\u2e80-\u2ffb\u3004\u3012-\u3013\u3020'
u'\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3'
u'\u3200-\u321e\u322a-\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u33ff'
u'\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ufb29\ufdfd\ufe62\ufe64-\ufe66'
u'\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe4\uffe8-\uffee\ufffc-\ufffd]+')
letter = (u'[a-zA-Z\\$_\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6'
u'\u00f8-\u02af\u0370-\u0373\u0376-\u0377\u037b-\u037d\u0386'
u'\u0388-\u03f5\u03f7-\u0481\u048a-\u0556\u0561-\u0587\u05d0-\u05f2'
u'\u0621-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5'
u'\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5'
u'\u07b1\u07ca-\u07ea\u0904-\u0939\u093d\u0950\u0958-\u0961'
u'\u0972-\u097f\u0985-\u09b9\u09bd\u09ce\u09dc-\u09e1\u09f0-\u09f1'
u'\u0a05-\u0a39\u0a59-\u0a5e\u0a72-\u0a74\u0a85-\u0ab9\u0abd'
u'\u0ad0-\u0ae1\u0b05-\u0b39\u0b3d\u0b5c-\u0b61\u0b71\u0b83-\u0bb9'
u'\u0bd0\u0c05-\u0c3d\u0c58-\u0c61\u0c85-\u0cb9\u0cbd\u0cde-\u0ce1'
u'\u0d05-\u0d3d\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0dc6\u0e01-\u0e30'
u'\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0eb0\u0eb2-\u0eb3\u0ebd-\u0ec4'
u'\u0edc-\u0f00\u0f40-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f'
u'\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070'
u'\u1075-\u1081\u108e\u10a0-\u10fa\u1100-\u135a\u1380-\u138f'
u'\u13a0-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u16ee-\u1711'
u'\u1720-\u1731\u1740-\u1751\u1760-\u1770\u1780-\u17b3\u17dc'
u'\u1820-\u1842\u1844-\u18a8\u18aa-\u191c\u1950-\u19a9\u19c1-\u19c7'
u'\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf'
u'\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1d00-\u1d2b\u1d62-\u1d77'
u'\u1d79-\u1d9a\u1e00-\u1fbc\u1fbe\u1fc2-\u1fcc\u1fd0-\u1fdb'
u'\u1fe0-\u1fec\u1ff2-\u1ffc\u2071\u207f\u2102\u2107\u210a-\u2113'
u'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139'
u'\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c7c'
u'\u2c80-\u2ce4\u2d00-\u2d65\u2d80-\u2dde\u3006-\u3007\u3021-\u3029'
u'\u3038-\u303a\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff-\u318e'
u'\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\ua014\ua016-\ua48c'
u'\ua500-\ua60b\ua610-\ua61f\ua62a-\ua66e\ua680-\ua697\ua722-\ua76f'
u'\ua771-\ua787\ua78b-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822'
u'\ua840-\ua873\ua882-\ua8b3\ua90a-\ua925\ua930-\ua946\uaa00-\uaa28'
u'\uaa40-\uaa42\uaa44-\uaa4b\uac00-\ud7a3\uf900-\ufb1d\ufb1f-\ufb28'
u'\ufb2a-\ufd3d\ufd50-\ufdfb\ufe70-\ufefc\uff21-\uff3a\uff41-\uff5a'
u'\uff66-\uff6f\uff71-\uff9d\uffa0-\uffdc]')
upper = (u'[A-Z\\$_\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108'
u'\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c'
u'\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130'
u'\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145'
u'\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a'
u'\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e'
u'\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182'
u'\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194'
u'\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7'
u'\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc'
u'\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9'
u'\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee'
u'\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204'
u'\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218'
u'\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c'
u'\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246'
u'\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038f'
u'\u0391-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0'
u'\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7'
u'\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a'
u'\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e'
u'\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a'
u'\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae'
u'\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1'
u'\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6'
u'\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea'
u'\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe'
u'\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512'
u'\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0531-\u0556'
u'\u10a0-\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e'
u'\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22'
u'\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36'
u'\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a'
u'\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e'
u'\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72'
u'\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86'
u'\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2'
u'\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6'
u'\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca'
u'\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede'
u'\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2'
u'\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d'
u'\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59-\u1f5f'
u'\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb'
u'\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112'
u'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133'
u'\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67'
u'\u2c69\u2c6b\u2c6d-\u2c6f\u2c72\u2c75\u2c80\u2c82\u2c84\u2c86'
u'\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a'
u'\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae'
u'\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2'
u'\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6'
u'\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\ua640\ua642\ua644\ua646'
u'\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a'
u'\ua65c\ua65e\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682'
u'\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696'
u'\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736'
u'\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a'
u'\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e'
u'\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b'
u'\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]')
idrest = u'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
letter_letter_digit = u'%s(?:%s|\d)*' % (letter, letter)
tokens = {
'root': [
# method names
(r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(u'@%s' % idrest, Name.Decorator),
(u'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
u'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
u'lazy|match|new|override|pr(?:ivate|otected)'
u'|re(?:quires|turn)|s(?:ealed|uper)|'
u't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\\b|'
u'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
(u':(?!%s)' % op, Keyword, 'type'),
(u'%s%s\\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
(r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'),
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
(r'""".*?"""(?!")', String),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(u"'%s" % idrest, Text.Symbol),
(r'[fs]"""', String, 'interptriplestring'), # interpolated strings
(r'[fs]"', String, 'interpstring'), # interpolated strings
(r'raw"(\\\\|\\"|[^"])*"', String), # raw strings
# (ur'(\.)(%s|%s|`[^`]+`)' % (idrest, op), bygroups(Operator,
# Name.Attribute)),
(idrest, Name),
(r'`[^`]+`', Name),
(r'\[', Operator, 'typeparam'),
(r'[(){};,.#]', Operator),
(op, Operator),
(r'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?',
Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(u'(%s|%s|`[^`]+`)(\\s*)(\\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
(r'\s+', Text),
(r'\{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(u'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
],
'type': [
(r'\s+', Text),
(r'<[%:]|>:|[#_]|forSome|type', Keyword),
(u'([,);}]|=>|=|\u21d2)(\\s*)', bygroups(Operator, Text), '#pop'),
(r'[({]', Operator, '#push'),
(u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)(\\[)' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')),
(u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)$' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text), '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
(r'[\s,]+', Text),
(u'<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\])}])', Operator, '#pop'),
(r'[(\[{]', Operator, '#push'),
(u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'import': [
(u'(%s|\\.)+' % idrest, Name.Namespace, '#pop')
],
'interpstringcommon': [
(r'[^"$\\]+', String),
(r'\$\$', String),
(r'\$' + letter_letter_digit, String.Interpol),
(r'\$\{', String.Interpol, 'interpbrace'),
(r'\\.', String),
],
'interptriplestring': [
(r'"""(?!")', String, '#pop'),
(r'"', String),
include('interpstringcommon'),
],
'interpstring': [
(r'"', String, '#pop'),
include('interpstringcommon'),
],
'interpbrace': [
(r'\}', String.Interpol, '#pop'),
(r'\{', String.Interpol, '#push'),
include('root'),
],
}
class GosuLexer(RegexLexer):
"""
For Gosu source code.
.. versionadded:: 1.5
"""
name = 'Gosu'
aliases = ['gosu']
filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
mimetypes = ['text/x-gosu']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # modifiers etc.
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
r'index|while|do|continue|break|return|try|catch|finally|this|'
r'throw|new|switch|case|default|eval|super|outer|classpath|'
r'using)\b', Keyword),
(r'(var|delegate|construct|function|private|internal|protected|'
r'public|abstract|override|final|static|extends|transient|'
r'implements|represents|readonly)\b', Keyword.Declaration),
(r'(property\s+)(get|set)?', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
(r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(uses)(\s+)([\w.]+\*?)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'"', String, 'string'),
(r'(\??[.#])([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'(:)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_$]\w*', Name),
(r'and|or|not|[\\~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\n', Text)
],
'templateText': [
(r'(\\<)|(\\\$)', String),
(r'(<%@\s+)(extends|params)',
bygroups(Operator, Name.Decorator), 'stringTemplate'),
(r'<%!--.*?--%>', Comment.Multiline),
(r'(<%)|(<%=)', Operator, 'stringTemplate'),
(r'\$\{', Operator, 'stringTemplateShorthand'),
(r'.', String)
],
'string': [
(r'"', String, '#pop'),
include('templateText')
],
'stringTemplate': [
(r'"', String, 'string'),
(r'%>', Operator, '#pop'),
include('root')
],
'stringTemplateShorthand': [
(r'"', String, 'string'),
(r'\{', Operator, 'stringTemplateShorthand'),
(r'\}', Operator, '#pop'),
include('root')
],
}
class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
.. versionadded:: 1.5
"""
name = 'Gosu Template'
aliases = ['gst']
filenames = ['*.gst']
mimetypes = ['text/x-gosu-template']
def get_tokens_unprocessed(self, text):
lexer = GosuLexer()
stack = ['templateText']
for item in lexer.get_tokens_unprocessed(text, stack):
yield item
class GroovyLexer(RegexLexer):
"""
For `Groovy <http://groovy.codehaus.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Groovy'
aliases = ['groovy']
filenames = ['*.groovy','*.gradle']
mimetypes = ['text/x-groovy']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# Groovy allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc, 'base'),
default('base'),
],
'base': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b',
Keyword),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(def|boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'""".*?"""', String.Double),
(r"'''.*?'''", String.Single),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'\$/((?!/\$).)*/\$', String),
(r'/(\\\\|\\"|[^/])*/', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
def analyse_text(text):
return shebang_matches(text, r'groovy')
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
.. versionadded:: 1.4
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#\{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'\}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
# Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
# Symbols
(r':[\w!:?]+', String.Symbol),
(r'[\w!:?]+:(?![\w!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
# Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())\s*"', String.Doc, 'documentation'),
# Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
# Mimic
(r'\w[\w!:?]+(?=\s*=.*mimic\s)', Name.Entity),
# Assignment
(r'[a-zA-Z_][\w!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![\w!:?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![\w!:?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![\w!:?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![\w!:?])', Keyword),
# DefaultBehaviour Literals
(r'(dict|list|message|set)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![\w!:?])', Keyword),
# DefaultBehaviour Aspects
(r'(after|around|before)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![\w!:?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
# DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![\w!:?])', Keyword),
# DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![\w!:?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![\w!:?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![\w!:?])', Name.Builtin),
# functions
(u'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
u'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
u'(?![\w!:?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
u'\\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![\w!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|\{|\})', Punctuation),
# kinds
(r'[A-Z][\w!:?]*', Name.Class),
# default cellnames
(r'[a-z_][\w!:?]*', Name)
]
}
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
.. versionadded:: 0.11
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
special_forms = (
'.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop'
)
# It's safe to consider 'ns' a declaration thing because it defines a new
# namespace.
declarations = (
'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod',
'defstruct', 'defonce', 'declare', 'definline', 'definterface',
'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns'
)
builtins = (
'*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for',
'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper')
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
# TODO / should divide keywords/symbols into namespace/rest
# but that's hard, so just pretend / is part of the name
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[abcdef\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
# keywords
(r'::?#?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(words(declarations, suffix=' '), Keyword.Declaration),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class ClojureScriptLexer(ClojureLexer):
"""
Lexer for `ClojureScript <http://clojure.org/clojurescript>`_
source code.
.. versionadded:: 2.0
"""
name = 'ClojureScript'
aliases = ['clojurescript', 'cljs']
filenames = ['*.cljs']
mimetypes = ['text/x-clojurescript', 'application/x-clojurescript']
class TeaLangLexer(RegexLexer):
"""
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w\.]*', Name.Decorator),
(r'(and|break|else|foreach|if|in|not|or|reverse)\b',
Keyword),
(r'(as|call|define)\b', Keyword.Declaration),
(r'(true|false|null)\b', Keyword.Constant),
(r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\'(\\\\|\\\'|[^\'])*\'', String),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_\$]\w*', Name),
(r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'template': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
class CeylonLexer(RegexLexer):
"""
For `Ceylon <http://ceylon-lang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'Ceylon'
aliases = ['ceylon']
filenames = ['*.ceylon']
mimetypes = ['text/x-ceylon']
flags = re.MULTILINE | re.DOTALL
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'(shared|abstract|formal|default|actual|variable|deprecated|small|'
r'late|literal|doc|by|see|throws|optional|license|tagged|final|native|'
r'annotation|sealed)\b', Name.Decorator),
(r'(break|case|catch|continue|else|finally|for|in|'
r'if|return|switch|this|throw|try|while|is|exists|dynamic|'
r'nonempty|then|outer|assert|let)\b', Keyword),
(r'(abstracts|extends|satisfies|'
r'super|given|of|out|assign)\b', Keyword.Declaration),
(r'(function|value|void|new)\b',
Keyword.Type),
(r'(assembly|module|package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface|object|alias)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
(r'".*``.*``.*"', String.Interpol),
(r'(\.)([a-z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex),
(r'#[0-9a-fA-F]+', Number.Hex),
(r'\$([01]{4})(_[01]{4})+', Number.Bin),
(r'\$[01]+', Number.Bin),
(r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer),
(r'[0-9]+[kMGTP]?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[A-Za-z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[a-z][\w.]*',
Name.Namespace, '#pop')
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class KotlinLexer(RegexLexer):
"""
For `Kotlin <http://kotlinlang.org/>`_
source code.
.. versionadded:: 1.5
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
mimetypes = ['text/x-kotlin']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
kt_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc') + ']*')
kt_id = '(' + kt_name + '|`' + kt_name + '`)'
tokens = {
'root': [
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'::|!!|\?[:.]', Operator),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'(class)(\s+)(object)', bygroups(Keyword, Text, Keyword)),
(r'(class|interface|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(package|import)(\s+)', bygroups(Keyword, Text), 'package'),
(r'(val|var)(\s+)', bygroups(Keyword, Text), 'property'),
(r'(fun)(\s+)', bygroups(Keyword, Text), 'function'),
(r'(abstract|annotation|as|break|by|catch|class|companion|const|'
r'constructor|continue|crossinline|data|do|dynamic|else|enum|'
r'external|false|final|finally|for|fun|get|if|import|in|infix|'
r'inline|inner|interface|internal|is|lateinit|noinline|null|'
r'object|open|operator|out|override|package|private|protected|'
r'public|reified|return|sealed|set|super|tailrec|this|throw|'
r'true|try|val|var|vararg|when|where|while)\b', Keyword),
(kt_id, Name),
],
'package': [
(r'\S+', Name.Namespace, '#pop')
],
'class': [
(kt_id, Name.Class, '#pop')
],
'property': [
(kt_id, Name.Property, '#pop')
],
'function': [
(kt_id, Name.Function, '#pop')
],
}
class XtendLexer(RegexLexer):
"""
For `Xtend <http://xtend-lang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'Xtend'
aliases = ['xtend']
filenames = ['*.xtend']
mimetypes = ['text/x-xtend']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|'
r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b',
Keyword),
(r'(def|abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r"(''')", String, 'template'),
(u'(\u00BB)', String, 'template'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
'template': [
(r"'''", String, '#pop'),
(u'\u00AB', String, '#pop'),
(r'.', String)
],
}
class PigLexer(RegexLexer):
"""
For `Pig Latin <https://pig.apache.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pig'
aliases = ['pig']
filenames = ['*.pig']
mimetypes = ['text/x-pig']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'\\\n', Text),
(r'\\', Text),
(r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
include('keywords'),
include('types'),
include('builtins'),
include('punct'),
include('operators'),
(r'[0-9]*\.[0-9]+(e[0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text),
(r'([a-z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[()#:]', Text),
(r'[^(:#\'")\s]+', Text),
(r'\S+\s+', Text) # TODO: make tests pass without \s+
],
'keywords': [
(r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
r'stream|through|union|using|void)\b', Keyword)
],
'builtins': [
(r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
r'TOKENIZE)\b', Name.Builtin)
],
'types': [
(r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
r'int|long|tuple)\b', Keyword.Type)
],
'punct': [
(r'[;(){}\[\]]', Punctuation),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
class GoloLexer(RegexLexer):
"""
For `Golo <http://golo-lang.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Golo'
filenames = ['*.golo']
aliases = ['golo']
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'(\^|\.\.\.|:|\?:|->|==|!=|=|\+|\*|%|/|<=|<|>=|>|=|\.)',
Operator),
(r'(?<=[^-])(-)(?=[^-])', Operator),
(r'(?<=[^`])(is|isnt|and|or|not|oftype|in|orIfNull)\b', Operator.Word),
(r'[]{}|(),[]', Punctuation),
(r'(module|import)(\s+)',
bygroups(Keyword.Namespace, Text),
'modname'),
(r'\b([a-zA-Z_][\w$.]*)(::)', bygroups(Name.Namespace, Punctuation)),
(r'\b([a-zA-Z_][\w$]*(?:\.[a-zA-Z_][\w$]*)+)\b', Name.Namespace),
(r'(let|var)(\s+)',
bygroups(Keyword.Declaration, Text),
'varname'),
(r'(struct)(\s+)',
bygroups(Keyword.Declaration, Text),
'structname'),
(r'(function)(\s+)',
bygroups(Keyword.Declaration, Text),
'funcname'),
(r'(null|true|false)\b', Keyword.Constant),
(r'(augment|pimp'
r'|if|else|case|match|return'
r'|case|when|then|otherwise'
r'|while|for|foreach'
r'|try|catch|finally|throw'
r'|local'
r'|continue|break)\b', Keyword),
(r'(map|array|list|set|vector|tuple)(\[)',
bygroups(Name.Builtin, Punctuation)),
(r'(print|println|readln|raise|fun'
r'|asInterfaceInstance)\b', Name.Builtin),
(r'(`?[a-zA-Z_][\w$]*)(\()',
bygroups(Name.Function, Punctuation)),
(r'-?[\d_]*\.[\d_]*([eE][+-]?\d[\d_]*)?F?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'-?\d[\d_]*L', Number.Integer.Long),
(r'-?\d[\d_]*', Number.Integer),
('`?[a-zA-Z_][\w$]*', Name),
(r'@[a-zA-Z_][\w$.]*', Name.Decorator),
(r'"""', String, combined('stringescape', 'triplestring')),
(r'"', String, combined('stringescape', 'doublestring')),
(r"'", String, combined('stringescape', 'singlestring')),
(r'----((.|\n)*?)----', String.Doc)
],
'funcname': [
(r'`?[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
'modname': [
(r'[a-zA-Z_][\w$.]*\*?', Name.Namespace, '#pop')
],
'structname': [
(r'`?[\w.]+\*?', Name.Class, '#pop')
],
'varname': [
(r'`?[a-zA-Z_][\w$]*', Name.Variable, '#pop'),
],
'string': [
(r'[^\\\'"\n]+', String),
(r'[\'"\\]', String)
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'triplestring': [
(r'"""', String, '#pop'),
include('string'),
(r'\n', String),
],
'doublestring': [
(r'"', String.Double, '#pop'),
include('string'),
],
'singlestring': [
(r"'", String, '#pop'),
include('string'),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
class JasminLexer(RegexLexer):
"""
For `Jasmin <http://jasmin.sourceforge.net/>`_ assembly code.
.. versionadded:: 2.0
"""
name = 'Jasmin'
aliases = ['jasmin', 'jasminxt']
filenames = ['*.j']
_whitespace = r' \n\t\r'
_ws = r'(?:[%s]+)' % _whitespace
_separator = r'%s:=' % _whitespace
_break = r'(?=[%s]|$)' % _separator
_name = r'[^%s]+' % _separator
_unqualified_name = r'(?:[^%s.;\[/]+)' % _separator
tokens = {
'default': [
(r'\n', Text, '#pop'),
(r"'", String.Single, ('#pop', 'quote')),
(r'"', String.Double, 'string'),
(r'=', Punctuation),
(r':', Punctuation, 'label'),
(_ws, Text),
(r';.*', Comment.Single),
(r'(\$[-+])?0x-?[\da-fA-F]+%s' % _break, Number.Hex),
(r'(\$[-+]|\+)?-?\d+%s' % _break, Number.Integer),
(r'-?(\d+\.\d*|\.\d+)([eE][-+]?\d+)?[fFdD]?'
r'[\x00-\x08\x0b\x0c\x0e-\x1f]*%s' % _break, Number.Float),
(r'\$%s' % _name, Name.Variable),
# Directives
(r'\.annotation%s' % _break, Keyword.Reserved, 'annotation'),
(r'(\.attribute|\.bytecode|\.debug|\.deprecated|\.enclosing|'
r'\.interface|\.line|\.signature|\.source|\.stack|\.var|abstract|'
r'annotation|bridge|class|default|enum|field|final|fpstrict|'
r'interface|native|private|protected|public|signature|static|'
r'synchronized|synthetic|transient|varargs|volatile)%s' % _break,
Keyword.Reserved),
(r'\.catch%s' % _break, Keyword.Reserved, 'caught-exception'),
(r'(\.class|\.implements|\.inner|\.super|inner|invisible|'
r'invisibleparam|outer|visible|visibleparam)%s' % _break,
Keyword.Reserved, 'class/convert-dots'),
(r'\.field%s' % _break, Keyword.Reserved,
('descriptor/convert-dots', 'field')),
(r'(\.end|\.limit|use)%s' % _break, Keyword.Reserved,
'no-verification'),
(r'\.method%s' % _break, Keyword.Reserved, 'method'),
(r'\.set%s' % _break, Keyword.Reserved, 'var'),
(r'\.throws%s' % _break, Keyword.Reserved, 'exception'),
(r'(from|offset|to|using)%s' % _break, Keyword.Reserved, 'label'),
(r'is%s' % _break, Keyword.Reserved,
('descriptor/convert-dots', 'var')),
(r'(locals|stack)%s' % _break, Keyword.Reserved, 'verification'),
(r'method%s' % _break, Keyword.Reserved, 'enclosing-method'),
# Instructions
(words((
'aaload', 'aastore', 'aconst_null', 'aload', 'aload_0', 'aload_1', 'aload_2',
'aload_3', 'aload_w', 'areturn', 'arraylength', 'astore', 'astore_0', 'astore_1',
'astore_2', 'astore_3', 'astore_w', 'athrow', 'baload', 'bastore', 'bipush',
'breakpoint', 'caload', 'castore', 'd2f', 'd2i', 'd2l', 'dadd', 'daload', 'dastore',
'dcmpg', 'dcmpl', 'dconst_0', 'dconst_1', 'ddiv', 'dload', 'dload_0', 'dload_1',
'dload_2', 'dload_3', 'dload_w', 'dmul', 'dneg', 'drem', 'dreturn', 'dstore', 'dstore_0',
'dstore_1', 'dstore_2', 'dstore_3', 'dstore_w', 'dsub', 'dup', 'dup2', 'dup2_x1',
'dup2_x2', 'dup_x1', 'dup_x2', 'f2d', 'f2i', 'f2l', 'fadd', 'faload', 'fastore', 'fcmpg',
'fcmpl', 'fconst_0', 'fconst_1', 'fconst_2', 'fdiv', 'fload', 'fload_0', 'fload_1',
'fload_2', 'fload_3', 'fload_w', 'fmul', 'fneg', 'frem', 'freturn', 'fstore', 'fstore_0',
'fstore_1', 'fstore_2', 'fstore_3', 'fstore_w', 'fsub', 'i2b', 'i2c', 'i2d', 'i2f', 'i2l',
'i2s', 'iadd', 'iaload', 'iand', 'iastore', 'iconst_0', 'iconst_1', 'iconst_2',
'iconst_3', 'iconst_4', 'iconst_5', 'iconst_m1', 'idiv', 'iinc', 'iinc_w', 'iload',
'iload_0', 'iload_1', 'iload_2', 'iload_3', 'iload_w', 'imul', 'ineg', 'int2byte',
'int2char', 'int2short', 'ior', 'irem', 'ireturn', 'ishl', 'ishr', 'istore', 'istore_0',
'istore_1', 'istore_2', 'istore_3', 'istore_w', 'isub', 'iushr', 'ixor', 'l2d', 'l2f',
'l2i', 'ladd', 'laload', 'land', 'lastore', 'lcmp', 'lconst_0', 'lconst_1', 'ldc2_w',
'ldiv', 'lload', 'lload_0', 'lload_1', 'lload_2', 'lload_3', 'lload_w', 'lmul', 'lneg',
'lookupswitch', 'lor', 'lrem', 'lreturn', 'lshl', 'lshr', 'lstore', 'lstore_0',
'lstore_1', 'lstore_2', 'lstore_3', 'lstore_w', 'lsub', 'lushr', 'lxor',
'monitorenter', 'monitorexit', 'nop', 'pop', 'pop2', 'ret', 'ret_w', 'return', 'saload',
'sastore', 'sipush', 'swap'), suffix=_break), Keyword.Reserved),
(r'(anewarray|checkcast|instanceof|ldc|ldc_w|new)%s' % _break,
Keyword.Reserved, 'class/no-dots'),
(r'invoke(dynamic|interface|nonvirtual|special|'
r'static|virtual)%s' % _break, Keyword.Reserved,
'invocation'),
(r'(getfield|putfield)%s' % _break, Keyword.Reserved,
('descriptor/no-dots', 'field')),
(r'(getstatic|putstatic)%s' % _break, Keyword.Reserved,
('descriptor/no-dots', 'static')),
(words((
'goto', 'goto_w', 'if_acmpeq', 'if_acmpne', 'if_icmpeq',
'if_icmpge', 'if_icmpgt', 'if_icmple', 'if_icmplt', 'if_icmpne',
'ifeq', 'ifge', 'ifgt', 'ifle', 'iflt', 'ifne', 'ifnonnull',
'ifnull', 'jsr', 'jsr_w'), suffix=_break),
Keyword.Reserved, 'label'),
(r'(multianewarray|newarray)%s' % _break, Keyword.Reserved,
'descriptor/convert-dots'),
(r'tableswitch%s' % _break, Keyword.Reserved, 'table')
],
'quote': [
(r"'", String.Single, '#pop'),
(r'\\u[\da-fA-F]{4}', String.Escape),
(r"[^'\\]+", String.Single)
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\([nrtfb"\'\\]|u[\da-fA-F]{4}|[0-3]?[0-7]{1,2})',
String.Escape),
(r'[^"\\]+', String.Double)
],
'root': [
(r'\n+', Text),
(r"'", String.Single, 'quote'),
include('default'),
(r'(%s)([ \t\r]*)(:)' % _name,
bygroups(Name.Label, Text, Punctuation)),
(_name, String.Other)
],
'annotation': [
(r'\n', Text, ('#pop', 'annotation-body')),
(r'default%s' % _break, Keyword.Reserved,
('#pop', 'annotation-default')),
include('default')
],
'annotation-body': [
(r'\n+', Text),
(r'\.end%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
(_name, String.Other, ('annotation-items', 'descriptor/no-dots'))
],
'annotation-default': [
(r'\n+', Text),
(r'\.end%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
default(('annotation-items', 'descriptor/no-dots'))
],
'annotation-items': [
(r"'", String.Single, 'quote'),
include('default'),
(_name, String.Other)
],
'caught-exception': [
(r'all%s' % _break, Keyword, '#pop'),
include('exception')
],
'class/convert-dots': [
include('default'),
(r'(L)((?:%s[/.])*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Class), '#pop')
],
'class/no-dots': [
include('default'),
(r'\[+', Punctuation, ('#pop', 'descriptor/no-dots')),
(r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'((?:%s/)*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Class), '#pop')
],
'descriptor/convert-dots': [
include('default'),
(r'\[+', Punctuation),
(r'(L)((?:%s[/.])*)(%s?)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
default('#pop')
],
'descriptor/no-dots': [
include('default'),
(r'\[+', Punctuation),
(r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
default('#pop')
],
'descriptors/convert-dots': [
(r'\)', Punctuation, '#pop'),
default('descriptor/convert-dots')
],
'enclosing-method': [
(_ws, Text),
(r'(?=[^%s]*\()' % _separator, Text, ('#pop', 'invocation')),
default(('#pop', 'class/convert-dots'))
],
'exception': [
include('default'),
(r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Exception), '#pop')
],
'field': [
(r'static%s' % _break, Keyword.Reserved, ('#pop', 'static')),
include('default'),
(r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Variable.Instance),
'#pop')
],
'invocation': [
include('default'),
(r'((?:%s[/.](?=[^%s(]*[/.]))*)(%s[/.])?(%s)(\()' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Function, Punctuation),
('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
'descriptor/convert-dots'))
],
'label': [
include('default'),
(_name, Name.Label, '#pop')
],
'method': [
include('default'),
(r'(%s)(\()' % _name, bygroups(Name.Function, Punctuation),
('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
'descriptor/convert-dots'))
],
'no-verification': [
(r'(locals|method|stack)%s' % _break, Keyword.Reserved, '#pop'),
include('default')
],
'static': [
include('default'),
(r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Variable.Class), '#pop')
],
'table': [
(r'\n+', Text),
(r'default%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
(_name, Name.Label)
],
'var': [
include('default'),
(_name, Name.Variable, '#pop')
],
'verification': [
include('default'),
(r'(Double|Float|Integer|Long|Null|Top|UninitializedThis)%s' %
_break, Keyword, '#pop'),
(r'Object%s' % _break, Keyword, ('#pop', 'class/no-dots')),
(r'Uninitialized%s' % _break, Keyword, ('#pop', 'label'))
]
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'^\s*[a-z]+_[a-z]+\b', text, re.MULTILINE):
score += 0.3
if re.search(r'^\s*\.(attribute|bytecode|debug|deprecated|enclosing|'
r'inner|interface|limit|set|signature|stack)\b', text,
re.MULTILINE):
score += 0.6
return score
| mit |
tomchristie/django-rest-framework | tests/test_parsers.py | 5 | 6745 | import io
import math
import pytest
from django import forms
from django.core.files.uploadhandler import (
MemoryFileUploadHandler, TemporaryFileUploadHandler
)
from django.http.request import RawPostDataException
from django.test import TestCase
from rest_framework.exceptions import ParseError
from rest_framework.parsers import (
FileUploadParser, FormParser, JSONParser, MultiPartParser
)
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
class Form(forms.Form):
field1 = forms.CharField(max_length=3)
field2 = forms.CharField()
class TestFormParser(TestCase):
def setUp(self):
self.string = "field1=abc&field2=defghijk"
def test_parse(self):
""" Make sure the `QueryDict` works OK """
parser = FormParser()
stream = io.StringIO(self.string)
data = parser.parse(stream)
assert Form(data).is_valid() is True
class TestFileUploadParser(TestCase):
def setUp(self):
class MockRequest:
pass
self.stream = io.BytesIO(b"Test text file")
request = MockRequest()
request.upload_handlers = (MemoryFileUploadHandler(),)
request.META = {
'HTTP_CONTENT_DISPOSITION': 'Content-Disposition: inline; filename=file.txt',
'HTTP_CONTENT_LENGTH': 14,
}
self.parser_context = {'request': request, 'kwargs': {}}
def test_parse(self):
"""
Parse raw file upload.
"""
parser = FileUploadParser()
self.stream.seek(0)
data_and_files = parser.parse(self.stream, None, self.parser_context)
file_obj = data_and_files.files['file']
assert file_obj.size == 14
def test_parse_missing_filename(self):
"""
Parse raw file upload when filename is missing.
"""
parser = FileUploadParser()
self.stream.seek(0)
self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = ''
with pytest.raises(ParseError) as excinfo:
parser.parse(self.stream, None, self.parser_context)
assert str(excinfo.value) == 'Missing filename. Request should include a Content-Disposition header with a filename parameter.'
def test_parse_missing_filename_multiple_upload_handlers(self):
"""
Parse raw file upload with multiple handlers when filename is missing.
Regression test for #2109.
"""
parser = FileUploadParser()
self.stream.seek(0)
self.parser_context['request'].upload_handlers = (
MemoryFileUploadHandler(),
MemoryFileUploadHandler()
)
self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = ''
with pytest.raises(ParseError) as excinfo:
parser.parse(self.stream, None, self.parser_context)
assert str(excinfo.value) == 'Missing filename. Request should include a Content-Disposition header with a filename parameter.'
def test_parse_missing_filename_large_file(self):
"""
Parse raw file upload when filename is missing with TemporaryFileUploadHandler.
"""
parser = FileUploadParser()
self.stream.seek(0)
self.parser_context['request'].upload_handlers = (
TemporaryFileUploadHandler(),
)
self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = ''
with pytest.raises(ParseError) as excinfo:
parser.parse(self.stream, None, self.parser_context)
assert str(excinfo.value) == 'Missing filename. Request should include a Content-Disposition header with a filename parameter.'
def test_get_filename(self):
parser = FileUploadParser()
filename = parser.get_filename(self.stream, None, self.parser_context)
assert filename == 'file.txt'
def test_get_encoded_filename(self):
parser = FileUploadParser()
self.__replace_content_disposition('inline; filename*=utf-8\'\'ÀĥƦ.txt')
filename = parser.get_filename(self.stream, None, self.parser_context)
assert filename == 'ÀĥƦ.txt'
self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8\'\'ÀĥƦ.txt')
filename = parser.get_filename(self.stream, None, self.parser_context)
assert filename == 'ÀĥƦ.txt'
self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8\'en-us\'ÀĥƦ.txt')
filename = parser.get_filename(self.stream, None, self.parser_context)
assert filename == 'ÀĥƦ.txt'
def __replace_content_disposition(self, disposition):
self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = disposition
class TestJSONParser(TestCase):
def bytes(self, value):
return io.BytesIO(value.encode())
def test_float_strictness(self):
parser = JSONParser()
# Default to strict
for value in ['Infinity', '-Infinity', 'NaN']:
with pytest.raises(ParseError):
parser.parse(self.bytes(value))
parser.strict = False
assert parser.parse(self.bytes('Infinity')) == float('inf')
assert parser.parse(self.bytes('-Infinity')) == float('-inf')
assert math.isnan(parser.parse(self.bytes('NaN')))
class TestPOSTAccessed(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_post_accessed_in_post_method(self):
django_request = self.factory.post('/', {'foo': 'bar'})
request = Request(django_request, parsers=[FormParser(), MultiPartParser()])
django_request.POST
assert request.POST == {'foo': ['bar']}
assert request.data == {'foo': ['bar']}
def test_post_accessed_in_post_method_with_json_parser(self):
django_request = self.factory.post('/', {'foo': 'bar'})
request = Request(django_request, parsers=[JSONParser()])
django_request.POST
assert request.POST == {}
assert request.data == {}
def test_post_accessed_in_put_method(self):
django_request = self.factory.put('/', {'foo': 'bar'})
request = Request(django_request, parsers=[FormParser(), MultiPartParser()])
django_request.POST
assert request.POST == {'foo': ['bar']}
assert request.data == {'foo': ['bar']}
def test_request_read_before_parsing(self):
django_request = self.factory.put('/', {'foo': 'bar'})
request = Request(django_request, parsers=[FormParser(), MultiPartParser()])
django_request.read()
with pytest.raises(RawPostDataException):
request.POST
with pytest.raises(RawPostDataException):
request.POST
request.data
| bsd-2-clause |
cowlicks/numpy | tools/cythonize.py | 86 | 6159 | #!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'numpy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'numpy'
VENDOR = 'NumPy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.19'):
raise Exception('Building %s requires Cython >= 0.19' % VENDOR)
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building %s requires Tempita: '
'pip install --user Tempita' % VENDOR)
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx' : process_pyx,
'.pyx.in' : process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| bsd-3-clause |
moniqx4/bite-project | deps/gdata-python-client/src/gdata/tlslite/constants.py | 279 | 7476 | """Constants used in various places."""
class CertificateType:
x509 = 0
openpgp = 1
cryptoID = 2
class HandshakeType:
hello_request = 0
client_hello = 1
server_hello = 2
certificate = 11
server_key_exchange = 12
certificate_request = 13
server_hello_done = 14
certificate_verify = 15
client_key_exchange = 16
finished = 20
class ContentType:
change_cipher_spec = 20
alert = 21
handshake = 22
application_data = 23
all = (20,21,22,23)
class AlertLevel:
warning = 1
fatal = 2
class AlertDescription:
"""
@cvar bad_record_mac: A TLS record failed to decrypt properly.
If this occurs during a shared-key or SRP handshake it most likely
indicates a bad password. It may also indicate an implementation
error, or some tampering with the data in transit.
This alert will be signalled by the server if the SRP password is bad. It
may also be signalled by the server if the SRP username is unknown to the
server, but it doesn't wish to reveal that fact.
This alert will be signalled by the client if the shared-key username is
bad.
@cvar handshake_failure: A problem occurred while handshaking.
This typically indicates a lack of common ciphersuites between client and
server, or some other disagreement (about SRP parameters or key sizes,
for example).
@cvar protocol_version: The other party's SSL/TLS version was unacceptable.
This indicates that the client and server couldn't agree on which version
of SSL or TLS to use.
@cvar user_canceled: The handshake is being cancelled for some reason.
"""
close_notify = 0
unexpected_message = 10
bad_record_mac = 20
decryption_failed = 21
record_overflow = 22
decompression_failure = 30
handshake_failure = 40
no_certificate = 41 #SSLv3
bad_certificate = 42
unsupported_certificate = 43
certificate_revoked = 44
certificate_expired = 45
certificate_unknown = 46
illegal_parameter = 47
unknown_ca = 48
access_denied = 49
decode_error = 50
decrypt_error = 51
export_restriction = 60
protocol_version = 70
insufficient_security = 71
internal_error = 80
user_canceled = 90
no_renegotiation = 100
unknown_srp_username = 120
missing_srp_username = 121
untrusted_srp_parameters = 122
class CipherSuite:
TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050
TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053
TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056
TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051
TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054
TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057
TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
TLS_RSA_WITH_RC4_128_SHA = 0x0005
srpSuites = []
srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
def getSrpSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
return suites
getSrpSuites = staticmethod(getSrpSuites)
srpRsaSuites = []
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
def getSrpRsaSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
return suites
getSrpRsaSuites = staticmethod(getSrpRsaSuites)
rsaSuites = []
rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA)
rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA)
def getRsaSuites(ciphers):
suites = []
for cipher in ciphers:
if cipher == "aes128":
suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA)
elif cipher == "aes256":
suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA)
elif cipher == "rc4":
suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA)
elif cipher == "3des":
suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA)
return suites
getRsaSuites = staticmethod(getRsaSuites)
tripleDESSuites = []
tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA)
tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA)
tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA)
aes128Suites = []
aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA)
aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA)
aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA)
aes256Suites = []
aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA)
aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA)
aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA)
rc4Suites = []
rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA)
class Fault:
badUsername = 101
badPassword = 102
badA = 103
clientSrpFaults = range(101,104)
badVerifyMessage = 601
clientCertFaults = range(601,602)
badPremasterPadding = 501
shortPremasterSecret = 502
clientNoAuthFaults = range(501,503)
badIdentifier = 401
badSharedKey = 402
clientSharedKeyFaults = range(401,403)
badB = 201
serverFaults = range(201,202)
badFinished = 300
badMAC = 301
badPadding = 302
genericFaults = range(300,303)
faultAlerts = {\
badUsername: (AlertDescription.unknown_srp_username, \
AlertDescription.bad_record_mac),\
badPassword: (AlertDescription.bad_record_mac,),\
badA: (AlertDescription.illegal_parameter,),\
badIdentifier: (AlertDescription.handshake_failure,),\
badSharedKey: (AlertDescription.bad_record_mac,),\
badPremasterPadding: (AlertDescription.bad_record_mac,),\
shortPremasterSecret: (AlertDescription.bad_record_mac,),\
badVerifyMessage: (AlertDescription.decrypt_error,),\
badFinished: (AlertDescription.decrypt_error,),\
badMAC: (AlertDescription.bad_record_mac,),\
badPadding: (AlertDescription.bad_record_mac,)
}
faultNames = {\
badUsername: "bad username",\
badPassword: "bad password",\
badA: "bad A",\
badIdentifier: "bad identifier",\
badSharedKey: "bad sharedkey",\
badPremasterPadding: "bad premaster padding",\
shortPremasterSecret: "short premaster secret",\
badVerifyMessage: "bad verify message",\
badFinished: "bad finished message",\
badMAC: "bad MAC",\
badPadding: "bad padding"
}
| apache-2.0 |
Tan0/ironic | ironic/api/controllers/v1/types.py | 1 | 8457 | # coding: utf-8
#
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_utils import strutils
from oslo_utils import uuidutils
import six
import wsme
from wsme import types as wtypes
from ironic.api.controllers.v1 import utils as v1_utils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import utils
class MacAddressType(wtypes.UserType):
"""A simple MAC address type."""
basetype = wtypes.text
name = 'macaddress'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
return utils.validate_and_normalize_mac(value)
@staticmethod
def frombasetype(value):
if value is None:
return None
return MacAddressType.validate(value)
class UuidOrNameType(wtypes.UserType):
"""A simple UUID or logical name type."""
basetype = wtypes.text
name = 'uuid_or_name'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
if not (uuidutils.is_uuid_like(value)
or v1_utils.is_valid_logical_name(value)):
raise exception.InvalidUuidOrName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidOrNameType.validate(value)
class NameType(wtypes.UserType):
"""A simple logical name type."""
basetype = wtypes.text
name = 'name'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
if not v1_utils.is_valid_logical_name(value):
raise exception.InvalidName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return NameType.validate(value)
class UuidType(wtypes.UserType):
"""A simple UUID type."""
basetype = wtypes.text
name = 'uuid'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
if not uuidutils.is_uuid_like(value):
raise exception.InvalidUUID(uuid=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidType.validate(value)
class BooleanType(wtypes.UserType):
"""A simple boolean type."""
basetype = wtypes.text
name = 'boolean'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
try:
return strutils.bool_from_string(value, strict=True)
except ValueError as e:
# raise Invalid to return 400 (BadRequest) in the API
raise exception.Invalid(e)
@staticmethod
def frombasetype(value):
if value is None:
return None
return BooleanType.validate(value)
class JsonType(wtypes.UserType):
"""A simple JSON type."""
basetype = wtypes.text
name = 'json'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
def __str__(self):
# These are the json serializable native types
return ' | '.join(map(str, (wtypes.text, six.integer_types, float,
BooleanType, list, dict, None)))
@staticmethod
def validate(value):
try:
json.dumps(value)
except TypeError:
raise exception.Invalid(_('%s is not JSON serializable') % value)
else:
return value
@staticmethod
def frombasetype(value):
return JsonType.validate(value)
class ListType(wtypes.UserType):
"""A simple list type."""
basetype = wtypes.text
name = 'list'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
"""Validate and convert the input to a ListType.
:param value: A comma separated string of values
:returns: A list of values.
"""
items = [v.strip().lower() for v in six.text_type(value).split(',')]
# filter() to remove empty items
# set() to remove duplicated items
return set(filter(None, items))
@staticmethod
def frombasetype(value):
if value is None:
return None
return ListType.validate(value)
macaddress = MacAddressType()
uuid_or_name = UuidOrNameType()
name = NameType()
uuid = UuidType()
boolean = BooleanType()
listtype = ListType()
# Can't call it 'json' because that's the name of the stdlib module
jsontype = JsonType()
class JsonPatchType(wtypes.Base):
"""A complex type that represents a single json-patch operation."""
path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
mandatory=True)
op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
mandatory=True)
value = wsme.wsattr(jsontype, default=wtypes.Unset)
@staticmethod
def internal_attrs():
"""Returns a list of internal attributes.
Internal attributes can't be added, replaced or removed. This
method may be overwritten by derived class.
"""
return ['/created_at', '/id', '/links', '/updated_at', '/uuid']
@staticmethod
def mandatory_attrs():
"""Retruns a list of mandatory attributes.
Mandatory attributes can't be removed from the document. This
method should be overwritten by derived class.
"""
return []
@staticmethod
def validate(patch):
_path = '/' + patch.path.split('/')[1]
if _path in patch.internal_attrs():
msg = _("'%s' is an internal attribute and can not be updated")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.path in patch.mandatory_attrs() and patch.op == 'remove':
msg = _("'%s' is a mandatory attribute and can not be removed")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.op != 'remove':
if patch.value is wsme.Unset:
msg = _("'add' and 'replace' operations needs value")
raise wsme.exc.ClientSideError(msg)
ret = {'path': patch.path, 'op': patch.op}
if patch.value is not wsme.Unset:
ret['value'] = patch.value
return ret
| apache-2.0 |
secretdataz/OpenKore-Src | src/scons-local-2.0.1/SCons/Tool/suncc.py | 61 | 1980 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
giggsey/SickRage | lib/sqlalchemy/sql/operators.py | 78 | 21953 | # sql/operators.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from .. import util
from operator import (
and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg,
getitem, lshift, rshift
)
if util.py2k:
from operator import div
else:
div = truediv
class Operators(object):
"""Base of comparison and logical operators.
Implements base methods :meth:`~sqlalchemy.sql.operators.Operators.operate` and
:meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as
:meth:`~sqlalchemy.sql.operators.Operators.__and__`,
:meth:`~sqlalchemy.sql.operators.Operators.__or__`,
:meth:`~sqlalchemy.sql.operators.Operators.__invert__`.
Usually is used via its most common subclass
:class:`.ColumnOperators`.
"""
def __and__(self, other):
"""Implement the ``&`` operator.
When used with SQL expressions, results in an
AND operation, equivalent to
:func:`~.expression.and_`, that is::
a & b
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
operator precedence; the ``&`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) & (b == 4)
"""
return self.operate(and_, other)
def __or__(self, other):
"""Implement the ``|`` operator.
When used with SQL expressions, results in an
OR operation, equivalent to
:func:`~.expression.or_`, that is::
a | b
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
operator precedence; the ``|`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) | (b == 4)
"""
return self.operate(or_, other)
def __invert__(self):
"""Implement the ``~`` operator.
When used with SQL expressions, results in a
NOT operation, equivalent to
:func:`~.expression.not_`, that is::
~a
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
return self.operate(inv)
def op(self, opstring, precedence=0, is_comparison=False):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in ``somecolumn``.
:param operator: a string which will be output as the infix operator
between this element and the expression passed to the
generated function.
:param precedence: precedence to apply to the operator, when
parenthesizing expressions. A lower number will cause the expression
to be parenthesized when applied against another operator with
higher precedence. The default value of ``0`` is lower than all
operators except for the comma (``,``) and ``AS`` operators.
A value of 100 will be higher or equal to all operators, and -100
will be lower than or equal to all operators.
.. versionadded:: 0.8 - added the 'precedence' argument.
:param is_comparison: if True, the operator will be considered as a
"comparison" operator, that is which evaulates to a boolean true/false
value, like ``==``, ``>``, etc. This flag should be set so that
ORM relationships can establish that the operator is a comparison
operator when used in a custom join condition.
.. versionadded:: 0.9.2 - added the :paramref:`.Operators.op.is_comparison`
flag.
.. seealso::
:ref:`types_operators`
:ref:`relationship_custom_operator`
"""
operator = custom_op(opstring, precedence, is_comparison)
def against(other):
return operator(self, other)
return against
def operate(self, op, *other, **kwargs):
"""Operate on an argument.
This is the lowest level of operation, raises
:class:`NotImplementedError` by default.
Overriding this on a subclass can allow common
behavior to be applied to all operations.
For example, overriding :class:`.ColumnOperators`
to apply ``func.lower()`` to the left and right
side::
class MyComparator(ColumnOperators):
def operate(self, op, other):
return op(func.lower(self), func.lower(other))
:param op: Operator callable.
:param \*other: the 'other' side of the operation. Will
be a single scalar for most operations.
:param \**kwargs: modifiers. These may be passed by special
operators such as :meth:`ColumnOperators.contains`.
"""
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
"""Reverse operate on an argument.
Usage is the same as :meth:`operate`.
"""
raise NotImplementedError(str(op))
class custom_op(object):
"""Represent a 'custom' operator.
:class:`.custom_op` is normally instantitated when the
:meth:`.ColumnOperators.op` method is used to create a
custom operator callable. The class can also be used directly
when programmatically constructing expressions. E.g.
to represent the "factorial" operation::
from sqlalchemy.sql import UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
"""
__name__ = 'custom_op'
def __init__(self, opstring, precedence=0, is_comparison=False):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
def __eq__(self, other):
return isinstance(other, custom_op) and \
other.opstring == self.opstring
def __hash__(self):
return id(self)
def __call__(self, left, right, **kw):
return left.operate(self, right, **kw)
class ColumnOperators(Operators):
"""Defines boolean, comparison, and other operators for
:class:`.ColumnElement` expressions.
By default, all methods call down to
:meth:`.operate` or :meth:`.reverse_operate`,
passing in the appropriate operator function from the
Python builtin ``operator`` module or
a SQLAlchemy-specific operator function from
:mod:`sqlalchemy.expression.operators`. For example
the ``__eq__`` function::
def __eq__(self, other):
return self.operate(operators.eq, other)
Where ``operators.eq`` is essentially::
def eq(a, b):
return a == b
The core column expression unit :class:`.ColumnElement`
overrides :meth:`.Operators.operate` and others
to return further :class:`.ColumnElement` constructs,
so that the ``==`` operation above is replaced by a clause
construct.
See also:
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
:class:`.ColumnOperators`
:class:`.PropComparator`
"""
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
"""Implement the ``<`` operator.
In a column context, produces the clause ``a < b``.
"""
return self.operate(lt, other)
def __le__(self, other):
"""Implement the ``<=`` operator.
In a column context, produces the clause ``a <= b``.
"""
return self.operate(le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
"""Implement the ``==`` operator.
In a column context, produces the clause ``a = b``.
If the target is ``None``, produces ``a IS NULL``.
"""
return self.operate(eq, other)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a column context, produces the clause ``a != b``.
If the target is ``None``, produces ``a IS NOT NULL``.
"""
return self.operate(ne, other)
def __gt__(self, other):
"""Implement the ``>`` operator.
In a column context, produces the clause ``a > b``.
"""
return self.operate(gt, other)
def __ge__(self, other):
"""Implement the ``>=`` operator.
In a column context, produces the clause ``a >= b``.
"""
return self.operate(ge, other)
def __neg__(self):
"""Implement the ``-`` operator.
In a column context, produces the clause ``-a``.
"""
return self.operate(neg)
def __getitem__(self, index):
"""Implement the [] operator.
This can be used by some database-specific types
such as Postgresql ARRAY and HSTORE.
"""
return self.operate(getitem, index)
def __lshift__(self, other):
"""implement the << operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
<< as an extension point.
"""
return self.operate(lshift, other)
def __rshift__(self, other):
"""implement the >> operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
>> as an extension point.
"""
return self.operate(rshift, other)
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
def like(self, other, escape=None):
"""Implement the ``like`` operator.
In a column context, produces the clause ``a LIKE other``.
E.g.::
select([sometable]).where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.like("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(like_op, other, escape=escape)
def ilike(self, other, escape=None):
"""Implement the ``ilike`` operator.
In a column context, produces the clause ``a ILIKE other``.
E.g.::
select([sometable]).where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.ilike("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(ilike_op, other, escape=escape)
def in_(self, other):
"""Implement the ``in`` operator.
In a column context, produces the clause ``a IN other``.
"other" may be a tuple/list of column expressions,
or a :func:`~.expression.select` construct.
"""
return self.operate(in_op, other)
def notin_(self, other):
"""implement the ``NOT IN`` operator.
This is equivalent to using negation with :meth:`.ColumnOperators.in_`,
i.e. ``~x.in_(y)``.
.. versionadded:: 0.8
.. seealso::
:meth:`.ColumnOperators.in_`
"""
return self.operate(notin_op, other)
def notlike(self, other, escape=None):
"""implement the ``NOT LIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``.
.. versionadded:: 0.8
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(notlike_op, other, escape=escape)
def notilike(self, other, escape=None):
"""implement the ``NOT ILIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``.
.. versionadded:: 0.8
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(notilike_op, other, escape=escape)
def is_(self, other):
"""Implement the ``IS`` operator.
Normally, ``IS`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS`` may be desirable if comparing to boolean values
on certain platforms.
.. versionadded:: 0.7.9
.. seealso:: :meth:`.ColumnOperators.isnot`
"""
return self.operate(is_, other)
def isnot(self, other):
"""Implement the ``IS NOT`` operator.
Normally, ``IS NOT`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS NOT`` may be desirable if comparing to boolean values
on certain platforms.
.. versionadded:: 0.7.9
.. seealso:: :meth:`.ColumnOperators.is_`
"""
return self.operate(isnot, other)
def startswith(self, other, **kwargs):
"""Implement the ``startwith`` operator.
In a column context, produces the clause ``LIKE '<other>%'``
"""
return self.operate(startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
"""Implement the 'endswith' operator.
In a column context, produces the clause ``LIKE '%<other>'``
"""
return self.operate(endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
"""Implement the 'contains' operator.
In a column context, produces the clause ``LIKE '%<other>%'``
"""
return self.operate(contains_op, other, **kwargs)
def match(self, other, **kwargs):
"""Implements the 'match' operator.
In a column context, this produces a MATCH clause, i.e.
``MATCH '<other>'``. The allowed contents of ``other``
are database backend specific.
"""
return self.operate(match_op, other, **kwargs)
def desc(self):
"""Produce a :func:`~.expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self):
"""Produce a :func:`~.expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nullsfirst(self):
"""Produce a :func:`~.expression.nullsfirst` clause against the
parent object."""
return self.operate(nullsfirst_op)
def nullslast(self):
"""Produce a :func:`~.expression.nullslast` clause against the
parent object."""
return self.operate(nullslast_op)
def collate(self, collation):
"""Produce a :func:`~.expression.collate` clause against
the parent object, given the collation string."""
return self.operate(collate, collation)
def __radd__(self, other):
"""Implement the ``+`` operator in reverse.
See :meth:`.ColumnOperators.__add__`.
"""
return self.reverse_operate(add, other)
def __rsub__(self, other):
"""Implement the ``-`` operator in reverse.
See :meth:`.ColumnOperators.__sub__`.
"""
return self.reverse_operate(sub, other)
def __rmul__(self, other):
"""Implement the ``*`` operator in reverse.
See :meth:`.ColumnOperators.__mul__`.
"""
return self.reverse_operate(mul, other)
def __rdiv__(self, other):
"""Implement the ``/`` operator in reverse.
See :meth:`.ColumnOperators.__div__`.
"""
return self.reverse_operate(div, other)
def between(self, cleft, cright):
"""Produce a :func:`~.expression.between` clause against
the parent object, given the lower and upper range."""
return self.operate(between_op, cleft, cright)
def distinct(self):
"""Produce a :func:`~.expression.distinct` clause against the
parent object.
"""
return self.operate(distinct_op)
def __add__(self, other):
"""Implement the ``+`` operator.
In a column context, produces the clause ``a + b``
if the parent object has non-string affinity.
If the parent object has a string affinity,
produces the concatenation operator, ``a || b`` -
see :meth:`.ColumnOperators.concat`.
"""
return self.operate(add, other)
def __sub__(self, other):
"""Implement the ``-`` operator.
In a column context, produces the clause ``a - b``.
"""
return self.operate(sub, other)
def __mul__(self, other):
"""Implement the ``*`` operator.
In a column context, produces the clause ``a * b``.
"""
return self.operate(mul, other)
def __div__(self, other):
"""Implement the ``/`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(div, other)
def __mod__(self, other):
"""Implement the ``%`` operator.
In a column context, produces the clause ``a % b``.
"""
return self.operate(mod, other)
def __truediv__(self, other):
"""Implement the ``//`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(truediv, other)
def __rtruediv__(self, other):
"""Implement the ``//`` operator in reverse.
See :meth:`.ColumnOperators.__truediv__`.
"""
return self.reverse_operate(truediv, other)
def from_():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def istrue(a):
raise NotImplementedError()
def isfalse(a):
raise NotImplementedError()
def is_(a, b):
return a.is_(b)
def isnot(a, b):
return a.isnot(b)
def collate(a, b):
return a.collate(b)
def op(a, opstring, b):
return a.op(opstring)(b)
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
def notlike_op(a, b, escape=None):
return a.notlike(b, escape=escape)
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
def notilike_op(a, b, escape=None):
return a.notilike(b, escape=escape)
def between_op(a, b, c):
return a.between(b, c)
def in_op(a, b):
return a.in_(b)
def notin_op(a, b):
return a.notin_(b)
def distinct_op(a):
return a.distinct()
def startswith_op(a, b, escape=None):
return a.startswith(b, escape=escape)
def notstartswith_op(a, b, escape=None):
return ~a.startswith(b, escape=escape)
def endswith_op(a, b, escape=None):
return a.endswith(b, escape=escape)
def notendswith_op(a, b, escape=None):
return ~a.endswith(b, escape=escape)
def contains_op(a, b, escape=None):
return a.contains(b, escape=escape)
def notcontains_op(a, b, escape=None):
return ~a.contains(b, escape=escape)
def match_op(a, b):
return a.match(b)
def comma_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
def nullsfirst_op(a):
return a.nullsfirst()
def nullslast_op(a):
return a.nullslast()
_commutative = set([eq, ne, add, mul])
_comparison = set([eq, ne, lt, gt, ge, le, between_op])
def is_comparison(op):
return op in _comparison or \
isinstance(op, custom_op) and op.is_comparison
def is_commutative(op):
return op in _commutative
def is_ordering_modifier(op):
return op in (asc_op, desc_op,
nullsfirst_op, nullslast_op)
_associative = _commutative.union([concat_op, and_, or_])
_natural_self_precedent = _associative.union([getitem])
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
"""
_asbool = util.symbol('_asbool', canonical=-10)
_smallest = util.symbol('_smallest', canonical=-100)
_largest = util.symbol('_largest', canonical=100)
_PRECEDENCE = {
from_: 15,
getitem: 15,
mul: 8,
truediv: 8,
div: 8,
mod: 8,
neg: 8,
add: 7,
sub: 7,
concat_op: 6,
match_op: 6,
ilike_op: 6,
notilike_op: 6,
like_op: 6,
notlike_op: 6,
in_op: 6,
notin_op: 6,
is_: 6,
isnot: 6,
eq: 5,
ne: 5,
gt: 5,
lt: 5,
ge: 5,
le: 5,
between_op: 5,
distinct_op: 5,
inv: 5,
istrue: 5,
isfalse: 5,
and_: 3,
or_: 2,
comma_op: -1,
desc_op: 3,
asc_op: 3,
collate: 4,
as_: -1,
exists: 0,
_asbool: -10,
_smallest: _smallest,
_largest: _largest
}
def is_precedent(operator, against):
if operator is against and operator in _natural_self_precedent:
return False
else:
return (_PRECEDENCE.get(operator,
getattr(operator, 'precedence', _smallest)) <=
_PRECEDENCE.get(against,
getattr(against, 'precedence', _largest)))
| gpl-3.0 |
DaVinci789/.emacs.d | elpa/elpy-20160131.118/elpy/tests/support.py | 13 | 29235 | # coding: utf-8
"""Support classes and functions for the elpy test code.
Elpy uses a bit of a peculiar test setup to avoid redundancy. For the
tests of the two backends, we provide generic test cases for generic
tests and for specific callback tests.
These mixins can be included in the actual test classes. We can't add
these tests to a BackendTestCase subclass directly because the test
discovery would find them there and try to run them, which would fail.
"""
import os
import shutil
import sys
import tempfile
import unittest
from elpy.tests import compat
class BackendTestCase(unittest.TestCase):
"""Base class for backend tests.
This class sets up a project root directory and provides an easy
way to create files within the project root.
"""
def setUp(self):
"""Create the project root and make sure it gets cleaned up."""
super(BackendTestCase, self).setUp()
self.project_root = tempfile.mkdtemp(prefix="elpy-test")
self.addCleanup(shutil.rmtree, self.project_root, True)
def project_file(self, relname, contents):
"""Create a file named relname within the project root.
Write contents into that file.
"""
full_name = os.path.join(self.project_root, relname)
try:
os.makedirs(os.path.dirname(full_name))
except OSError:
pass
if compat.PYTHON3:
fobj = open(full_name, "w", encoding="utf-8")
else:
fobj = open(full_name, "w")
with fobj as f:
f.write(contents)
return full_name
class GenericRPCTests(object):
"""Generic RPC test methods.
This is a mixin to add tests that should be run for all RPC
methods that follow the generic (filename, source, offset) calling
conventions.
"""
METHOD = None
def rpc(self, filename, source, offset):
method = getattr(self.backend, self.METHOD)
return method(filename, source, offset)
def test_should_not_fail_on_inexisting_file(self):
filename = self.project_root + "/doesnotexist.py"
self.rpc(filename, "", 0)
def test_should_not_fail_on_empty_file(self):
filename = self.project_file("test.py", "")
self.rpc(filename, "", 0)
def test_should_not_fail_if_file_is_none(self):
self.rpc(None, "", 0)
def test_should_not_fail_for_module_syntax_errors(self):
source, offset = source_and_offset(
"class Foo(object):\n"
" def bar(self):\n"
" foo(_|_"
" bar("
"\n"
" def a(self):\n"
" pass\n"
"\n"
" def b(self):\n"
" pass\n"
"\n"
" def b(self):\n"
" pass\n"
"\n"
" def b(self):\n"
" pass\n"
"\n"
" def b(self):\n"
" pass\n"
"\n"
" def b(self):\n"
" pass\n"
)
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_bad_indentation(self):
# Bug in Rope: rope#80
source, offset = source_and_offset(
"def foo():\n"
" print(23)_|_\n"
" print(17)\n")
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
@unittest.skipIf((3, 3) <= sys.version_info < (3, 4),
"Bug in jedi for Python 3.3")
def test_should_not_fail_for_relative_import(self):
# Bug in Rope: rope#81 and rope#82
source, offset = source_and_offset(
"from .. import foo_|_"
)
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_on_keyword(self):
source, offset = source_and_offset(
"_|_try:\n"
" pass\n"
"except:\n"
" pass\n")
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_with_bad_encoding(self):
# Bug in Rope: rope#83
source, offset = source_and_offset(
u'# coding: utf-8X_|_\n'
)
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_with_form_feed_characters(self):
# Bug in Jedi: jedi#424
source, offset = source_and_offset(
"\f\n"
"class Test(object):_|_\n"
" pass"
)
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_dictionaries_in_weird_places(self):
# Bug in Jedi: jedi#417
source, offset = source_and_offset(
"import json\n"
"\n"
"def foo():\n"
" json.loads(_|_\n"
"\n"
" json.load.return_value = {'foo': [],\n"
" 'bar': True}\n"
"\n"
" c = Foo()\n"
)
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_break_with_binary_characters_in_docstring(self):
# Bug in Jedi: jedi#427
template = '''\
class Foo(object):
def __init__(self):
"""
COMMUNITY instance that this conversion belongs to.
DISPERSY_VERSION is the dispersy conversion identifier (on the wire version; must be one byte).
COMMUNIY_VERSION is the community conversion identifier (on the wire version; must be one byte).
COMMUNIY_VERSION may not be '\\x00' or '\\xff'. '\\x00' is used by the DefaultConversion until
a proper conversion instance can be made for the Community. '\\xff' is reserved for when
more than one byte is needed as a version indicator.
"""
pass
x = Foo()
x._|_
'''
source, offset = source_and_offset(template)
filename = self.project_file("test.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_def_without_name(self):
# Bug jedi#429
source, offset = source_and_offset(
"def_|_():\n"
" if True:\n"
" return True\n"
" else:\n"
" return False\n"
)
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_on_lambda(self):
# Bug #272 / jedi#431, jedi#572
source, offset = source_and_offset(
"map(lambda_|_"
)
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_on_literals(self):
# Bug #314, #344 / jedi#466
source = u'lit = u"""\\\n# -*- coding: utf-8 -*-\n"""\n'
offset = 0
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_with_args_as_args(self):
# Bug #347 in rope_py3k
source, offset = source_and_offset(
"def my_function(*args):\n"
" ret_|_"
)
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_unicode_chars_in_string(self):
# Bug #358 / jedi#482
source = '''\
# coding: utf-8
logging.info(u"Saving «{}»...".format(title))
requests.get(u"https://web.archive.org/save/{}".format(url))
'''
offset = 57
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_bad_escape_sequence(self):
# Bug #360 / jedi#485
source = r"v = '\x'"
offset = 8
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_coding_declarations_in_strings(self):
# Bug #314 / jedi#465 / python#22221
source = u'lit = """\\\n# -*- coding: utf-8 -*-\n"""'
offset = 8
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_if_root_vanishes(self):
# Bug #353
source, offset = source_and_offset(
"import foo\n"
"foo._|_"
)
filename = self.project_file("project.py", source)
shutil.rmtree(self.project_root)
self.rpc(filename, source, offset)
# For some reason, this breaks a lot of other tests. Couldn't
# figure out why.
#
# def test_should_not_fail_for_sys_path(self):
# # Bug #365 / jedi#486
# source, offset = source_and_offset(
# "import sys\n"
# "\n"
# "sys.path.index(_|_\n"
# )
# filename = self.project_file("project.py", source)
#
# self.rpc(filename, source, offset)
def test_should_not_fail_for_key_error(self):
# Bug #561, #564, #570, #588, #593, #599 / jedi#572, jedi#579,
# jedi#590
source, offset = source_and_offset(
"map(lambda_|_"
)
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_for_badly_defined_global_variable(self):
# Bug #519 / jedi#610
source, offset = source_and_offset(
"""\
def funct1():
global global_dict_var
global_dict_var = dict()
def funct2():
global global_dict_var
q = global_dict_var.copy_|_()
print(q)""")
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
def test_should_not_fail_with_mergednamesdict(self):
# Bug #563 / jedi#589
source, offset = source_and_offset(
u'from email import message_|_'
)
filename = self.project_file("project.py", source)
self.rpc(filename, source, offset)
class RPCGetCompletionsTests(GenericRPCTests):
METHOD = "rpc_get_completions"
def test_should_complete_builtin(self):
source, offset = source_and_offset("o_|_")
expected = ["object", "oct", "open", "or", "ord"]
actual = [cand['name'] for cand in
self.backend.rpc_get_completions("test.py",
source, offset)]
for candidate in expected:
self.assertIn(candidate, actual)
if sys.version_info >= (3, 5):
JSON_COMPLETIONS = ["SONDecoder", "SONEncoder", "SONDecodeError"]
else:
JSON_COMPLETIONS = ["SONDecoder", "SONEncoder"]
def test_should_complete_imports(self):
source, offset = source_and_offset("import json\n"
"json.J_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
self.assertEqual(
sorted([cand['suffix'] for cand in completions]),
sorted(self.JSON_COMPLETIONS))
def test_should_complete_top_level_modules_for_import(self):
source, offset = source_and_offset("import multi_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
if compat.PYTHON3:
expected = ["processing"]
else:
expected = ["file", "processing"]
self.assertEqual(sorted([cand['suffix'] for cand in completions]),
sorted(expected))
def test_should_complete_packages_for_import(self):
source, offset = source_and_offset("import email.mi_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
self.assertEqual([cand['suffix'] for cand in completions],
["me"])
def test_should_not_complete_for_import(self):
source, offset = source_and_offset("import foo.Conf_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
self.assertEqual([cand['suffix'] for cand in completions],
[])
@unittest.skipIf((3, 3) <= sys.version_info < (3, 4),
"Bug in jedi for Python 3.3")
def test_should_not_fail_for_short_module(self):
source, offset = source_and_offset("from .. import foo_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
self.assertIsNotNone(completions)
def test_should_complete_sys(self):
source, offset = source_and_offset("import sys\nsys._|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
self.assertIn('path', [cand['suffix'] for cand in completions])
def test_should_find_with_trailing_text(self):
source, offset = source_and_offset(
"import threading\nthreading.T_|_mumble mumble")
expected = ["Thread", "ThreadError", "Timer"]
actual = [cand['name'] for cand in
self.backend.rpc_get_completions("test.py", source, offset)]
for candidate in expected:
self.assertIn(candidate, actual)
def test_should_find_completion_different_package(self):
# See issue #74
self.project_file("project/__init__.py", "")
source1 = ("class Add:\n"
" def add(self, a, b):\n"
" return a + b\n")
self.project_file("project/add.py", source1)
source2, offset = source_and_offset(
"from project.add import Add\n"
"class Calculator:\n"
" def add(self, a, b):\n"
" c = Add()\n"
" c.ad_|_\n")
file2 = self.project_file("project/calculator.py", source2)
proposals = self.backend.rpc_get_completions(file2,
source2,
offset)
self.assertEqual(["add"],
[proposal["name"] for proposal in proposals])
class RPCGetCompletionDocstringTests(object):
def test_should_return_docstring(self):
source, offset = source_and_offset("import json\n"
"json.JSONEnc_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
completions.sort(key=lambda p: p["name"])
prop = completions[0]
self.assertEqual(prop["name"], "JSONEncoder")
docs = self.backend.rpc_get_completion_docstring("JSONEncoder")
self.assertIn("Extensible JSON", docs)
def test_should_return_none_if_unknown(self):
docs = self.backend.rpc_get_completion_docstring("Foo")
self.assertIsNone(docs)
class RPCGetCompletionLocationTests(object):
def test_should_return_location(self):
source, offset = source_and_offset("donaudampfschiff = 1\n"
"donau_|_")
filename = self.project_file("test.py", source)
completions = self.backend.rpc_get_completions(filename,
source,
offset)
prop = completions[0]
self.assertEqual(prop["name"], "donaudampfschiff")
loc = self.backend.rpc_get_completion_location("donaudampfschiff")
self.assertEqual((filename, 1), loc)
def test_should_return_none_if_unknown(self):
docs = self.backend.rpc_get_completion_location("Foo")
self.assertIsNone(docs)
class RPCGetDefinitionTests(GenericRPCTests):
METHOD = "rpc_get_definition"
def test_should_return_definition_location_same_file(self):
source, offset = source_and_offset("import threading\n"
"def test_function(a, b):\n"
" return a + b\n"
"\n"
"test_func_|_tion(\n")
filename = self.project_file("test.py", source)
location = self.backend.rpc_get_definition(filename,
source,
offset)
self.assertEqual(location[0], filename)
# On def or on the function name
self.assertIn(location[1], (17, 21))
def test_should_return_location_in_same_file_if_not_saved(self):
source, offset = source_and_offset(
"import threading\n"
"\n"
"\n"
"def other_function():\n"
" test_f_|_unction(1, 2)\n"
"\n"
"\n"
"def test_function(a, b):\n"
" return a + b\n")
filename = self.project_file("test.py", "")
location = self.backend.rpc_get_definition(filename,
source,
offset)
self.assertEqual(location[0], filename)
# def or function name
self.assertIn(location[1], (67, 71))
def test_should_return_location_in_different_file(self):
source1 = ("def test_function(a, b):\n"
" return a + b\n")
file1 = self.project_file("test1.py", source1)
source2, offset = source_and_offset("from test1 import test_function\n"
"test_funct_|_ion(1, 2)\n")
file2 = self.project_file("test2.py", source2)
definition = self.backend.rpc_get_definition(file2,
source2,
offset)
self.assertEqual(definition[0], file1)
# Either on the def or on the function name
self.assertIn(definition[1], (0, 4))
def test_should_return_none_if_location_not_found(self):
source, offset = source_and_offset("test_f_|_unction()\n")
filename = self.project_file("test.py", source)
definition = self.backend.rpc_get_definition(filename,
source,
offset)
self.assertIsNone(definition)
def test_should_return_none_if_outside_of_symbol(self):
source, offset = source_and_offset("test_function(_|_)\n")
filename = self.project_file("test.py", source)
definition = self.backend.rpc_get_definition(filename,
source,
offset)
self.assertIsNone(definition)
def test_should_return_definition_location_different_package(self):
# See issue #74
self.project_file("project/__init__.py", "")
source1 = ("class Add:\n"
" def add(self, a, b):\n"
" return a + b\n")
file1 = self.project_file("project/add.py", source1)
source2, offset = source_and_offset(
"from project.add import Add\n"
"class Calculator:\n"
" def add(self, a, b):\n"
" return Add_|_().add(a, b)\n")
file2 = self.project_file("project/calculator.py", source2)
location = self.backend.rpc_get_definition(file2,
source2,
offset)
self.assertEqual(location[0], file1)
# class or class name
self.assertIn(location[1], (0, 6))
def test_should_find_variable_definition(self):
source, offset = source_and_offset("SOME_VALUE = 1\n"
"\n"
"variable = _|_SOME_VALUE\n")
filename = self.project_file("test.py", source)
self.assertEqual(self.backend.rpc_get_definition(filename,
source,
offset),
(filename, 0))
class RPCGetCalltipTests(GenericRPCTests):
METHOD = "rpc_get_calltip"
@unittest.skipIf(sys.version_info >= (3, 0),
"Bug in Jedi 0.9.0")
def test_should_get_calltip(self):
source, offset = source_and_offset(
"import threading\nthreading.Thread(_|_")
filename = self.project_file("test.py", source)
calltip = self.backend.rpc_get_calltip(filename,
source,
offset)
expected = self.THREAD_CALLTIP
self.assertEqual(calltip, expected)
@unittest.skipIf(sys.version_info >= (3, 0),
"Bug in Jedi 0.9.0")
def test_should_get_calltip_even_after_parens(self):
source, offset = source_and_offset(
"import threading\nthreading.Thread(foo()_|_")
filename = self.project_file("test.py", source)
actual = self.backend.rpc_get_calltip(filename,
source,
offset)
self.assertEqual(self.THREAD_CALLTIP, actual)
@unittest.skipIf(sys.version_info >= (3, 0),
"Bug in Jedi 0.9.0")
def test_should_get_calltip_at_closing_paren(self):
source, offset = source_and_offset(
"import threading\nthreading.Thread(_|_)")
filename = self.project_file("test.py", source)
actual = self.backend.rpc_get_calltip(filename,
source,
offset)
self.assertEqual(self.THREAD_CALLTIP, actual)
def test_should_not_missing_attribute_get_definition(self):
# Bug #627 / jedi#573
source, offset = source_and_offset(
"import threading\nthreading.Thread(_|_)")
filename = self.project_file("test.py", source)
self.backend.rpc_get_calltip(filename, source, offset)
def test_should_return_none_for_bad_identifier(self):
source, offset = source_and_offset(
"froblgoo(_|_")
filename = self.project_file("test.py", source)
calltip = self.backend.rpc_get_calltip(filename,
source,
offset)
self.assertIsNone(calltip)
def test_should_remove_self_argument(self):
source, offset = source_and_offset(
"d = dict()\n"
"d.keys(_|_")
filename = self.project_file("test.py", source)
actual = self.backend.rpc_get_calltip(filename,
source,
offset)
self.assertEqual(self.KEYS_CALLTIP, actual)
def test_should_remove_package_prefix(self):
source, offset = source_and_offset(
"import decimal\n"
"d = decimal.Decimal('1.5')\n"
"d.radix(_|_")
filename = self.project_file("test.py", source)
actual = self.backend.rpc_get_calltip(filename,
source,
offset)
self.assertEqual(self.RADIX_CALLTIP, actual)
def test_should_return_none_outside_of_all(self):
filename = self.project_file("test.py", "")
source, offset = source_and_offset("import thr_|_eading\n")
calltip = self.backend.rpc_get_calltip(filename,
source, offset)
self.assertIsNone(calltip)
def test_should_find_calltip_different_package(self):
# See issue #74
self.project_file("project/__init__.py", "")
source1 = ("class Add:\n"
" def add(self, a, b):\n"
" return a + b\n")
self.project_file("project/add.py", source1)
source2, offset = source_and_offset(
"from project.add import Add\n"
"class Calculator:\n"
" def add(self, a, b):\n"
" c = Add()\n"
" c.add(_|_\n")
file2 = self.project_file("project/calculator.py", source2)
actual = self.backend.rpc_get_calltip(file2,
source2,
offset)
self.assertEqual(self.ADD_CALLTIP, actual)
class RPCGetDocstringTests(GenericRPCTests):
METHOD = "rpc_get_docstring"
def check_docstring(self, docstring):
def first_line(s):
return s[:s.index("\n")]
self.assertEqual(first_line(docstring),
self.JSON_LOADS_DOCSTRING)
def test_should_get_docstring(self):
source, offset = source_and_offset(
"import json\njson.loads_|_(")
filename = self.project_file("test.py", source)
docstring = self.backend.rpc_get_docstring(filename,
source,
offset)
self.check_docstring(docstring)
def test_should_return_none_for_bad_identifier(self):
source, offset = source_and_offset(
"froblgoo_|_(\n")
filename = self.project_file("test.py", source)
docstring = self.backend.rpc_get_docstring(filename,
source,
offset)
self.assertIsNone(docstring)
class RPCGetUsagesTests(GenericRPCTests):
METHOD = "rpc_get_usages"
def test_should_return_uses_in_same_file(self):
filename = self.project_file("test.py", "")
source, offset = source_and_offset(
"def foo(x):\n"
" return _|_x + x\n")
usages = self.backend.rpc_get_usages(filename,
source,
offset)
self.assertEqual(usages,
[{'name': 'x',
'offset': 8,
'filename': filename},
{'name': 'x',
'filename': filename,
'offset': 23},
{'name': u'x',
'filename': filename,
'offset': 27}])
def test_should_return_uses_in_other_file(self):
file1 = self.project_file("file1.py", "")
file2 = self.project_file("file2.py", "\n\n\n\n\nx = 5")
source, offset = source_and_offset(
"import file2\n"
"file2._|_x\n")
usages = self.backend.rpc_get_usages(file1,
source,
offset)
self.assertEqual(usages,
[{'name': 'x',
'filename': file1,
'offset': 19},
{'name': 'x',
'filename': file2,
'offset': 5}])
def test_should_not_fail_without_symbol(self):
filename = self.project_file("file.py", "")
usages = self.backend.rpc_get_usages(filename,
"",
0)
self.assertEqual(usages, [])
def source_and_offset(source):
"""Return a source and offset from a source description.
>>> source_and_offset("hello, _|_world")
("hello, world", 7)
>>> source_and_offset("_|_hello, world")
("hello, world", 0)
>>> source_and_offset("hello, world_|_")
("hello, world", 12)
"""
offset = source.index("_|_")
return source[:offset] + source[offset + 3:], offset
| mit |
patmcb/odoo | addons/payment_sips/controllers/main.py | 153 | 1864 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import werkzeug
from openerp import http
from openerp.http import request
_logger = logging.getLogger(__name__)
class SipsController(http.Controller):
_notify_url = '/payment/sips/ipn/'
_return_url = '/payment/sips/dpn/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from sips. """
return_url = post.pop('return_url', '')
if not return_url:
tx_obj = request.registry['payment.transaction']
data = tx_obj._sips_data_to_object(post.get('Data'))
custom = json.loads(data.pop('returnContext', False) or '{}')
return_url = custom.get('return_url', '/')
return return_url
def sips_validate_data(self, **post):
res = False
env = request.env
tx_obj = env['payment.transaction']
acquirer_obj = env['payment.acquirer']
sips = acquirer_obj.search([('provider', '=', 'sips')], limit=1)
security = sips._sips_generate_shasign(post)
if security == post['Seal']:
_logger.debug('Sips: validated data')
res = tx_obj.sudo().form_feedback(post, 'sips')
else:
_logger.warning('Sips: data are corrupted')
return res
@http.route([
'/payment/sips/ipn/'],
type='http', auth='none', methods=['POST'])
def sips_ipn(self, **post):
""" Sips IPN. """
self.sips_validate_data(**post)
return ''
@http.route([
'/payment/sips/dpn'], type='http', auth="none", methods=['POST'])
def sips_dpn(self, **post):
""" Sips DPN """
return_url = self._get_return_url(**post)
self.sips_validate_data(**post)
return werkzeug.utils.redirect(return_url)
| agpl-3.0 |
MrSurly/micropython-esp32 | tests/float/string_format_modulo.py | 24 | 1353 | print("%s" % 1.0)
print("%r" % 1.0)
print("%d" % 1.0)
print("%i" % 1.0)
print("%u" % 1.0)
# these 3 have different behaviour in Python 3.x versions
# uPy raises a TypeError, following Python 3.5 (earlier versions don't)
#print("%x" % 18.0)
#print("%o" % 18.0)
#print("%X" % 18.0)
print("%e" % 1.23456)
print("%E" % 1.23456)
print("%f" % 1.23456)
print("%F" % 1.23456)
print("%g" % 1.23456)
print("%G" % 1.23456)
print("%06e" % float("inf"))
print("%06e" % float("-inf"))
print("%06e" % float("nan"))
print("%02.3d" % 123) # prec > width
print("%+f %+f" % (1.23, -1.23)) # float sign
print("% f % f" % (1.23, -1.23)) # float space sign
print("%0f" % -1.23) # negative number with 0 padding
# numbers with large negative exponents
print('%f' % 1e-10)
print('%f' % 1e-20)
print('%f' % 1e-50)
print('%f' % 1e-100)
print('%f' % 1e-300)
# large decimal precision should be truncated and not overflow buffer
# the output depends on the FP calculation so only first 2 digits are printed
# (the 'g' with small e are printed using 'f' style, so need to be checked)
print(('%.40f' % 1e-300)[:2])
print(('%.40g' % 1e-1)[:2])
print(('%.40g' % 1e-2)[:2])
print(('%.40g' % 1e-3)[:2])
print(('%.40g' % 1e-4)[:2])
print("%.0g" % 1) # 0 precision 'g'
print('%.1e' % 9.99) # round up with positive exponent
print('%.1e' % 0.999) # round up with negative exponent
| mit |
jadonk/debexpo | debexpo/lib/utils.py | 2 | 3365 | # -*- coding: utf-8 -*-
#
# utils.py — Debexpo utility functions
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <jonny@debian.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Holds misc utility functions.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb'
__license__ = 'MIT'
import logging
import hashlib
import os
from pylons import config
from debexpo.lib import gnupg
log = logging.getLogger(__name__)
def parse_section(section):
"""
Works out the component and section from the "Section" field.
Sections like `python` or `libdevel` are in main.
Sections with a prefix, separated with a forward-slash also show the component.
It returns a list of strings in the form [component, section].
For example, `non-free/python` has component `non-free` and section `python`.
``section``
Section name to parse.
"""
if '/' in section:
return section.split('/')
else:
return ['main', section]
def get_package_dir(source):
"""
Returns the directory name where the package with name supplied as the first argument
should be installed.
``source``
Source package name to use to work out directory name.
"""
if source.startswith('lib'):
return os.path.join(source[:4], source)
else:
return os.path.join(source[0], source)
def md5sum(filename):
"""
Returns the md5sum of a file specified.
``filename``
File name of the file to md5sum.
"""
try:
f = file(filename, 'rb')
except:
raise AttributeError('Failed to open file %s.' % filename)
sum = hashlib.md5()
while True:
chunk = f.read(10240)
if not chunk:
break
sum.update(chunk)
f.close()
return sum.hexdigest()
def random_hash():
s = os.urandom(20)
return hash_it(s)
def hash_it(s):
if type(s) == unicode:
s = s.encode('utf-8')
return hashlib.md5(s).hexdigest()
def get_gnupg():
"""
Returns an instantiated GnuPG object using debexpo's options for the ``gpg`` binary's and keyring's paths.
"""
return gnupg.GnuPG(config['debexpo.gpg_path'],
config['debexpo.gpg_keyring'])
| mit |
4shadoww/usploit | core/lib/future/types/__init__.py | 70 | 6842 | """
This module contains backports the data types that were significantly changed
in the transition from Python 2 to Python 3.
- an implementation of Python 3's bytes object (pure Python subclass of
Python 2's builtin 8-bit str type)
- an implementation of Python 3's str object (pure Python subclass of
Python 2's builtin unicode type)
- a backport of the range iterator from Py3 with slicing support
It is used as follows::
from __future__ import division, absolute_import, print_function
from builtins import bytes, dict, int, range, str
to bring in the new semantics for these functions from Python 3. And
then, for example::
b = bytes(b'ABCD')
assert list(b) == [65, 66, 67, 68]
assert repr(b) == "b'ABCD'"
assert [65, 66] in b
# These raise TypeErrors:
# b + u'EFGH'
# b.split(u'B')
# bytes(b',').join([u'Fred', u'Bill'])
s = str(u'ABCD')
# These raise TypeErrors:
# s.join([b'Fred', b'Bill'])
# s.startswith(b'A')
# b'B' in s
# s.find(b'A')
# s.replace(u'A', b'a')
# This raises an AttributeError:
# s.decode('utf-8')
assert repr(s) == 'ABCD' # consistent repr with Py3 (no u prefix)
for i in range(10**11)[:10]:
pass
and::
class VerboseList(list):
def append(self, item):
print('Adding an item')
super().append(item) # new simpler super() function
For more information:
---------------------
- future.types.newbytes
- future.types.newdict
- future.types.newint
- future.types.newobject
- future.types.newrange
- future.types.newstr
Notes
=====
range()
-------
``range`` is a custom class that backports the slicing behaviour from
Python 3 (based on the ``xrange`` module by Dan Crosta). See the
``newrange`` module docstring for more details.
super()
-------
``super()`` is based on Ryan Kelly's ``magicsuper`` module. See the
``newsuper`` module docstring for more details.
round()
-------
Python 3 modifies the behaviour of ``round()`` to use "Banker's Rounding".
See http://stackoverflow.com/a/10825998. See the ``newround`` module
docstring for more details.
"""
from __future__ import absolute_import, division, print_function
import functools
from numbers import Integral
from future import utils
# Some utility functions to enforce strict type-separation of unicode str and
# bytes:
def disallow_types(argnums, disallowed_types):
"""
A decorator that raises a TypeError if any of the given numbered
arguments is of the corresponding given type (e.g. bytes or unicode
string).
For example:
@disallow_types([0, 1], [unicode, bytes])
def f(a, b):
pass
raises a TypeError when f is called if a unicode object is passed as
`a` or a bytes object is passed as `b`.
This also skips over keyword arguments, so
@disallow_types([0, 1], [unicode, bytes])
def g(a, b=None):
pass
doesn't raise an exception if g is called with only one argument a,
e.g.:
g(b'Byte string')
Example use:
>>> class newbytes(object):
... @disallow_types([1], [unicode])
... def __add__(self, other):
... pass
>>> newbytes('1234') + u'1234' #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: can't concat 'bytes' to (unicode) str
"""
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
# These imports are just for this decorator, and are defined here
# to prevent circular imports:
from .newbytes import newbytes
from .newint import newint
from .newstr import newstr
errmsg = "argument can't be {0}"
for (argnum, mytype) in zip(argnums, disallowed_types):
# Handle the case where the type is passed as a string like 'newbytes'.
if isinstance(mytype, str) or isinstance(mytype, bytes):
mytype = locals()[mytype]
# Only restrict kw args only if they are passed:
if len(args) <= argnum:
break
# Here we use type() rather than isinstance() because
# __instancecheck__ is being overridden. E.g.
# isinstance(b'abc', newbytes) is True on Py2.
if type(args[argnum]) == mytype:
raise TypeError(errmsg.format(mytype))
return function(*args, **kwargs)
return wrapper
return decorator
def no(mytype, argnums=(1,)):
"""
A shortcut for the disallow_types decorator that disallows only one type
(in any position in argnums).
Example use:
>>> class newstr(object):
... @no('bytes')
... def __add__(self, other):
... pass
>>> newstr(u'1234') + b'1234' #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: argument can't be bytes
The object can also be passed directly, but passing the string helps
to prevent circular import problems.
"""
if isinstance(argnums, Integral):
argnums = (argnums,)
disallowed_types = [mytype] * len(argnums)
return disallow_types(argnums, disallowed_types)
def issubset(list1, list2):
"""
Examples:
>>> issubset([], [65, 66, 67])
True
>>> issubset([65], [65, 66, 67])
True
>>> issubset([65, 66], [65, 66, 67])
True
>>> issubset([65, 67], [65, 66, 67])
False
"""
n = len(list1)
for startpos in range(len(list2) - n + 1):
if list2[startpos:startpos+n] == list1:
return True
return False
if utils.PY3:
import builtins
bytes = builtins.bytes
dict = builtins.dict
int = builtins.int
list = builtins.list
object = builtins.object
range = builtins.range
str = builtins.str
# The identity mapping
newtypes = {bytes: bytes,
dict: dict,
int: int,
list: list,
object: object,
range: range,
str: str}
__all__ = ['newtypes']
else:
from .newbytes import newbytes
from .newdict import newdict
from .newint import newint
from .newlist import newlist
from .newrange import newrange
from .newobject import newobject
from .newstr import newstr
newtypes = {bytes: newbytes,
dict: newdict,
int: newint,
long: newint,
list: newlist,
object: newobject,
range: newrange,
str: newbytes,
unicode: newstr}
__all__ = ['newbytes', 'newdict', 'newint', 'newlist', 'newrange', 'newstr', 'newtypes']
| mit |
robin-lai/scikit-learn | sklearn/neighbors/unsupervised.py | 117 | 4755 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
davidzchen/tensorflow | tensorflow/python/keras/metrics.py | 3 | 123088 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
# pylint: disable=g-classes-have-attributes
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
import numpy as np
import six
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.saving.saved_model import metric_serialization
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_variable
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.metrics.Metric')
@six.add_metaclass(abc.ABCMeta)
class Metric(base_layer.Layer):
"""Encapsulates metric logic and state.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Additional layer keywords arguments.
Standalone usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with `compile()` API:
```python
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=10)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```python
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super(Metric, self).__init__(name=name, dtype=dtype, **kwargs)
self.stateful = True # All metric layers are stateful.
self.built = True
if not base_layer_utils.v2_dtype_behavior_enabled():
# We only do this when the V2 behavior is not enabled, as when it is
# enabled, the dtype already defaults to floatx.
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls)
# If `update_state` is not in eager/tf.function and it is not from a
# built-in metric, wrap it in `tf.function`. This is so that users writing
# custom metrics in v1 need not worry about control dependencies and
# return ops.
if (base_layer_utils.is_in_eager_or_tf_function() or
is_built_in(cls)):
obj_update_state = obj.update_state
def update_state_fn(*args, **kwargs):
control_status = ag_ctx.control_status_ctx()
ag_update_state = autograph.tf_convert(obj_update_state, control_status)
return ag_update_state(*args, **kwargs)
else:
if isinstance(obj.update_state, def_function.Function):
update_state_fn = obj.update_state
else:
update_state_fn = def_function.function(obj.update_state)
obj.update_state = types.MethodType(
metrics_utils.update_state_wrapper(update_state_fn), obj)
obj_result = obj.result
def result_fn(*args, **kwargs):
control_status = ag_ctx.control_status_ctx()
ag_result = autograph.tf_convert(obj_result, control_status)
return ag_result(*args, **kwargs)
obj.result = types.MethodType(metrics_utils.result_wrapper(result_fn), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
def replica_local_fn(*args, **kwargs):
"""Updates the state of the metric in a replica-local context."""
if any(
isinstance(arg, keras_tensor.KerasTensor)
for arg in nest.flatten((args, kwargs))):
update_op = None
else:
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
update_ops = []
if update_op is not None:
update_ops.append(update_op)
with ops.control_dependencies(update_ops):
result_t = self.result() # pylint: disable=not-callable
# We are adding the metric object as metadata on the result tensor.
# This is required when we want to use a metric with `add_metric` API on
# a Model/Layer in graph mode. This metric instance will later be used
# to reset variable state after each epoch of training.
# Example:
# model = Model()
# mean = Mean()
# model.add_metric(mean(values), name='mean')
result_t._metric_obj = self # pylint: disable=protected-access
return result_t
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
return distributed_training_utils.call_replica_local_fn(
replica_local_fn, *args, **kwargs)
@property
def dtype(self):
return self._dtype
def get_config(self):
"""Returns the serializable config of the metric."""
return {'name': self.name, 'dtype': self.dtype}
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
K.batch_set_value([(v, 0) for v in self.variables])
@abc.abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
raise NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
@doc_controls.for_subclass_implementers
def add_weight(self,
name,
shape=(),
aggregation=tf_variables.VariableAggregation.SUM,
synchronization=tf_variables.VariableSynchronization.ON_READ,
initializer=None,
dtype=None):
"""Adds state variable. Only for use by subclasses."""
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
if distribute_ctx.has_strategy():
strategy = distribute_ctx.get_strategy()
else:
strategy = None
# TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU.
if distributed_training_utils.is_tpu_strategy(strategy):
synchronization = tf_variables.VariableSynchronization.ON_WRITE
with ops.init_scope():
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype if dtype is None else dtype,
trainable=False,
initializer=initializer,
collections=[],
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
@property
def _trackable_saved_model_saver(self):
return metric_serialization.MetricSavedModelSaver(self)
class Reduce(Metric):
"""Encapsulates metrics that perform a reduce operation on the values.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self, reduction, name, dtype=None):
super(Reduce, self).__init__(name=name, dtype=dtype)
self.reduction = reduction
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN]:
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the metric.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
[values], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[values], sample_weight)
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = math_ops.reduce_sum(
values, axis=list(range(weight_ndim, ndim)))
else:
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
values = math_ops.multiply(values, sample_weight)
value_sum = math_ops.reduce_sum(values)
with ops.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
num_values = math_ops.reduce_sum(sample_weight)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
with ops.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return array_ops.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE
]:
return math_ops.div_no_nan(self.total, self.count)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
@keras_export('keras.metrics.Sum')
class Sum(Reduce):
"""Computes the (weighted) sum of the given values.
For example, if values is [1, 3, 5, 7] then the sum is 16.
If the weights were specified as [1, 1, 0, 0] then the sum would be 4.
This metric creates one variable, `total`, that is used to compute the sum of
`values`. This is ultimately returned as `sum`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Sum()
>>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
16.0
Usage with `compile()` API:
```python
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile(optimizer='sgd', loss='mse')
```
"""
def __init__(self, name='sum', dtype=None):
super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM,
name=name, dtype=dtype)
@keras_export('keras.metrics.Mean')
class Mean(Reduce):
"""Computes the (weighted) mean of the given values.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Mean()
>>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
4.0
>>> m.reset_states()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
2.0
Usage with `compile()` API:
```python
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile(optimizer='sgd', loss='mse')
```
"""
def __init__(self, name='mean', dtype=None):
super(Mean, self).__init__(
reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)
@keras_export('keras.metrics.MeanRelativeError')
class MeanRelativeError(Mean):
"""Computes the mean relative error by normalizing with the given values.
This metric creates two local variables, `total` and `count` that are used to
compute the mean relative error. This is weighted by `sample_weight`, and
it is ultimately returned as `mean_relative_error`:
an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
normalizer: The normalizer values with same shape as predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
>>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
>>> # metric = mean(|y_pred - y_true| / normalizer)
>>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
>>> # = 5/4 = 1.25
>>> m.result().numpy()
1.25
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
```
"""
def __init__(self, normalizer, name=None, dtype=None):
super(MeanRelativeError, self).__init__(name=name, dtype=dtype)
normalizer = math_ops.cast(normalizer, self._dtype)
self.normalizer = normalizer
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_pred, y_true], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true], sample_weight)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions(
y_pred, self.normalizer)
y_pred.shape.assert_is_compatible_with(y_true.shape)
relative_errors = math_ops.div_no_nan(
math_ops.abs(y_true - y_pred), self.normalizer)
return super(MeanRelativeError, self).update_state(
relative_errors, sample_weight=sample_weight)
def get_config(self):
n = self.normalizer
config = {'normalizer': K.eval(n) if is_tensor_or_variable(n) else n}
base_config = super(MeanRelativeError, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
def __init__(self, fn, name=None, dtype=None, **kwargs):
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
sample_weight: Optional `sample_weight` acts as a
coefficient for the metric. If a scalar is provided, then the metric is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the metric for each sample of the batch is rescaled
by the corresponding element in the `sample_weight` vector. If the shape
of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
to this shape), then each metric element of `y_pred` is scaled by the
corresponding value of `sample_weight`. (Note on `dN-1`: all metric
functions reduce by 1 dimension, usually the last axis (-1)).
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
if type(self) is MeanMetricWrapper: # pylint: disable=unidiomatic-typecheck
# Only include function argument when the object is a MeanMetricWrapper
# and not a subclass.
config['fn'] = self._fn
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# Note that while MeanMetricWrapper itself isn't public, objects of this
# class may be created and added to the model by calling model.compile.
fn = config.pop('fn', None)
if cls is MeanMetricWrapper:
return cls(get(fn), **config)
return super(MeanMetricWrapper, cls).from_config(config)
@keras_export('keras.metrics.Accuracy')
class Accuracy(MeanMetricWrapper):
"""Calculates how often predictions equal labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Accuracy()])
```
"""
def __init__(self, name='accuracy', dtype=None):
super(Accuracy, self).__init__(accuracy, name, dtype=dtype)
@keras_export('keras.metrics.BinaryAccuracy')
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions match binary labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Standalone usage:
>>> m = tf.keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@keras_export('keras.metrics.CategoricalAccuracy')
class CategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `categorical accuracy`: an idempotent operation that
simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities, rather
than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name='categorical_accuracy', dtype=None):
super(CategoricalAccuracy, self).__init__(
categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.SparseCategoricalAccuracy')
class SparseCategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `sparse categorical accuracy`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name='sparse_categorical_accuracy', dtype=None):
super(SparseCategoricalAccuracy, self).__init__(
sparse_categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.TopKCategoricalAccuracy')
class TopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):
super(TopKCategoricalAccuracy, self).__init__(
top_k_categorical_accuracy, name, dtype=dtype, k=k)
@keras_export('keras.metrics.SparseTopKCategoricalAccuracy')
class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):
super(SparseTopKCategoricalAccuracy, self).__init__(
sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.
thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple
of float threshold values in [0, 1]. A threshold is compared with
prediction values to determine the truth value of predictions (i.e., above
the threshold is `true`, below is `false`). One metric value is generated
for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self,
confusion_matrix_cond,
thresholds=None,
name=None,
dtype=None):
super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5)
self.accumulator = self.add_weight(
'accumulator',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return ops.convert_to_tensor_v2_with_dispatch(result)
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {'thresholds': self.init_thresholds}
base_config = super(_ConfusionMatrixConditionCount, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.FalsePositives')
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.FalsePositives()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.FalsePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(FalsePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.FalseNegatives')
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.FalseNegatives()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.FalseNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(FalseNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TrueNegatives')
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TrueNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(TrueNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TruePositives')
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TruePositives()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TruePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(TruePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.Precision')
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
The metric creates two local variables, `true_positives` and `false_positives`
that are used to compute the precision. This value is ultimately returned as
`precision`, an idempotent operation that simply divides `true_positives`
by the sum of `true_positives` and `false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Precision()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
0.6666667
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
>>> # With top_k=2, it will calculate precision over y_true[:2] and y_pred[:2]
>>> m = tf.keras.metrics.Precision(top_k=2)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result().numpy()
0.0
>>> # With top_k=4, it will calculate precision over y_true[:4] and y_pred[:4]
>>> m = tf.keras.metrics.Precision(top_k=4)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Precision()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
super(Precision, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_positives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Precision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Recall')
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Recall()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
0.6666667
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Recall()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
super(Recall, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Recall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@six.add_metaclass(abc.ABCMeta)
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
"""
def __init__(self, value, num_thresholds=200, name=None, dtype=None):
super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype)
if num_thresholds <= 0:
raise ValueError('`num_thresholds` must be > 0.')
self.value = value
self.true_positives = self.add_weight(
'true_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
else:
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
self.thresholds = [0.0] + thresholds + [1.0]
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def reset_states(self):
num_thresholds = len(self.thresholds)
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def _find_max_under_constraint(self, constrained, dependent, predicate):
"""Returns the maximum of dependent_statistic that satisfies the constraint.
Args:
constrained: Over these values the constraint
is specified. A rank-1 tensor.
dependent: From these values the maximum that satiesfies the
constraint is selected. Values in this tensor and in
`constrained` are linked by having the same threshold at each
position, hence this tensor must have the same shape.
predicate: A binary boolean functor to be applied to arguments
`constrained` and `self.value`, e.g. `tf.greater`.
Returns maximal dependent value, if no value satiesfies the constraint 0.0.
"""
feasible = array_ops.where(predicate(constrained, self.value))
feasible_exists = math_ops.greater(array_ops.size(feasible), 0)
def get_max():
return math_ops.reduce_max(array_ops.gather(dependent, feasible))
return control_flow_ops.cond(feasible_exists, get_max, lambda: 0.0)
@keras_export('keras.metrics.SensitivityAtSpecificity')
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes best sensitivity where specificity is >= specified value.
the sensitivity at a given specificity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
sensitivity at the given specificity. The threshold for the given specificity
value is computed and used to evaluate the corresponding sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SensitivityAtSpecificity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 1])
>>> m.result().numpy()
0.333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SensitivityAtSpecificity()])
```
"""
def __init__(self, specificity, num_thresholds=200, name=None, dtype=None):
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
self.specificity = specificity
self.num_thresholds = num_thresholds
super(SensitivityAtSpecificity, self).__init__(
specificity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
return self._find_max_under_constraint(
specificities, sensitivities, math_ops.greater_equal)
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'specificity': self.specificity
}
base_config = super(SensitivityAtSpecificity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.SpecificityAtSensitivity')
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes best specificity where sensitivity is >= specified value.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
specificity at the given sensitivity. The threshold for the given sensitivity
value is computed and used to evaluate the corresponding specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given sensitivity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SpecificityAtSensitivity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.66666667
>>> m.reset_states()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 2])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SpecificityAtSensitivity()])
```
"""
def __init__(self, sensitivity, num_thresholds=200, name=None, dtype=None):
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
super(SpecificityAtSensitivity, self).__init__(
sensitivity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
return self._find_max_under_constraint(
sensitivities, specificities, math_ops.greater_equal)
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'sensitivity': self.sensitivity
}
base_config = super(SpecificityAtSensitivity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.PrecisionAtRecall')
class PrecisionAtRecall(SensitivitySpecificityBase):
"""Computes best precision where recall is >= specified value.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
precision at the given recall. The threshold for the given recall
value is computed and used to evaluate the corresponding precision.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
recall: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given recall.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.PrecisionAtRecall(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[2, 2, 2, 1, 1])
>>> m.result().numpy()
0.33333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)])
```
"""
def __init__(self, recall, num_thresholds=200, name=None, dtype=None):
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
self.num_thresholds = num_thresholds
super(PrecisionAtRecall, self).__init__(
value=recall,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
def result(self):
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
precisions = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
return self._find_max_under_constraint(
recalls, precisions, math_ops.greater_equal)
def get_config(self):
config = {'num_thresholds': self.num_thresholds, 'recall': self.recall}
base_config = super(PrecisionAtRecall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.RecallAtPrecision')
class RecallAtPrecision(SensitivitySpecificityBase):
"""Computes best recall where precision is >= specified value.
For a given score-label-distribution the required precision might not
be achievable, in this case 0.0 is returned as recall.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
recall at the given precision. The threshold for the given precision
value is computed and used to evaluate the corresponding recall.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
precision: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given precision.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.RecallAtPrecision(0.8)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)])
```
"""
def __init__(self, precision, num_thresholds=200, name=None, dtype=None):
if precision < 0 or precision > 1:
raise ValueError('`precision` must be in the range [0, 1].')
self.precision = precision
self.num_thresholds = num_thresholds
super(RecallAtPrecision, self).__init__(
value=precision,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
def result(self):
precisions = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
return self._find_max_under_constraint(
precisions, recalls, math_ops.greater_equal)
def get_config(self):
config = {'num_thresholds': self.num_thresholds,
'precision': self.precision}
base_config = super(RecallAtPrecision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.AUC')
class AUC(Metric):
"""Computes the approximate AUC (Area under the curve) via a Riemann sum.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the AUC.
To discretize the AUC curve, a linearly spaced set of thresholds is used to
compute pairs of recall and precision values. The area under the ROC-curve is
therefore computed using the height of the recall values by the false positive
rate, while the area under the PR-curve is the computed using the height of
the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`. The `thresholds` parameter can be
used to manually specify thresholds which split the predictions more evenly.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use when discretizing the roc curve. Values must be > 1.
curve: (Optional) Specifies the name of the curve to be computed, 'ROC'
[default] or 'PR' for the Precision-Recall-curve.
summation_method: (Optional) Specifies the [Riemann summation method](
https://en.wikipedia.org/wiki/Riemann_sum) used.
'interpolation' (default) applies mid-point summation scheme for `ROC`.
For PR-AUC, interpolates (true/false) positives but not the ratio that
is precision (see Davis & Goadrich 2006 for details);
'minoring' applies left summation
for increasing intervals and right summation for decreasing intervals;
'majoring' does the opposite.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
thresholds: (Optional) A list of floating point values to use as the
thresholds for discretizing the curve. If set, the `num_thresholds`
parameter is ignored. Values should be in [0, 1]. Endpoint thresholds
equal to {-epsilon, 1+epsilon} for a small positive epsilon value will
be automatically included with these to correctly handle predictions
equal to exactly 0 or 1.
multi_label: boolean indicating whether multilabel data should be
treated as such, wherein AUC is computed separately for each label and
then averaged across labels, or (when False) if the data should be
flattened into a single label before AUC computation. In the latter
case, when multilabel data is passed to AUC, each label-prediction pair
is treated as an individual data point. Should be set to False for
multi-class data.
label_weights: (optional) list, array, or tensor of non-negative weights
used to compute AUCs for multilabel data. When `multi_label` is True,
the weights are applied to the individual label AUCs when they are
averaged to produce the multi-label AUC. When it's False, they are used
to weight the individual label predictions in computing the confusion
matrix on the flattened data. Note that this is unlike class_weights in
that class_weights weights the example depending on the value of its
label, whereas label_weights depends only on the index of that label
before flattening; therefore `label_weights` should not be used for
multi-class data.
Standalone usage:
>>> m = tf.keras.metrics.AUC(num_thresholds=3)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
>>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
>>> # recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
>>> # auc = ((((1+0.5)/2)*(1-0))+ (((0.5+0)/2)*(0-0))) = 0.75
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
```
"""
def __init__(self,
num_thresholds=200,
curve='ROC',
summation_method='interpolation',
name=None,
dtype=None,
thresholds=None,
multi_label=False,
label_weights=None):
# Validate configurations.
if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
metrics_utils.AUCCurve):
raise ValueError('Invalid curve: "{}". Valid options are: "{}"'.format(
curve, list(metrics_utils.AUCCurve)))
if isinstance(
summation_method,
metrics_utils.AUCSummationMethod) and summation_method not in list(
metrics_utils.AUCSummationMethod):
raise ValueError(
'Invalid summation method: "{}". Valid options are: "{}"'.format(
summation_method, list(metrics_utils.AUCSummationMethod)))
# Update properties.
if thresholds is not None:
# If specified, use the supplied thresholds.
self.num_thresholds = len(thresholds) + 2
thresholds = sorted(thresholds)
else:
if num_thresholds <= 1:
raise ValueError('`num_thresholds` must be > 1.')
# Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
# (0, 1).
self.num_thresholds = num_thresholds
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
# Add an endpoint "threshold" below zero and above one for either
# threshold method to account for floating point imprecisions.
self._thresholds = np.array([0.0 - K.epsilon()] + thresholds +
[1.0 + K.epsilon()])
if isinstance(curve, metrics_utils.AUCCurve):
self.curve = curve
else:
self.curve = metrics_utils.AUCCurve.from_str(curve)
if isinstance(summation_method, metrics_utils.AUCSummationMethod):
self.summation_method = summation_method
else:
self.summation_method = metrics_utils.AUCSummationMethod.from_str(
summation_method)
super(AUC, self).__init__(name=name, dtype=dtype)
# Handle multilabel arguments.
self.multi_label = multi_label
if label_weights is not None:
label_weights = constant_op.constant(label_weights, dtype=self.dtype)
checks = [
check_ops.assert_non_negative(
label_weights,
message='All values of `label_weights` must be non-negative.')
]
self.label_weights = control_flow_ops.with_dependencies(
checks, label_weights)
else:
self.label_weights = None
self._built = False
if self.multi_label:
self._num_labels = None
else:
self._build(None)
@property
def thresholds(self):
"""The thresholds used for evaluating AUC."""
return list(self._thresholds)
def _build(self, shape):
"""Initialize TP, FP, TN, and FN tensors, given the shape of the data."""
if self.multi_label:
if shape.ndims != 2:
raise ValueError('`y_true` must have rank=2 when `multi_label` is '
'True. Found rank %s.' % shape.ndims)
self._num_labels = shape[1]
variable_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(self.num_thresholds), self._num_labels])
else:
variable_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(self.num_thresholds)])
self._build_input_shape = shape
# Create metric variables
self.true_positives = self.add_weight(
'true_positives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
if self.multi_label:
with ops.init_scope():
# This should only be necessary for handling v1 behavior. In v2, AUC
# should be initialized outside of any tf.functions, and therefore in
# eager mode.
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
deps = []
if not self._built:
self._build(tensor_shape.TensorShape(y_pred.shape))
if self.multi_label or (self.label_weights is not None):
# y_true should have shape (number of examples, number of labels).
shapes = [
(y_true, ('N', 'L'))
]
if self.multi_label:
# TP, TN, FP, and FN should all have shape
# (number of thresholds, number of labels).
shapes.extend([(self.true_positives, ('T', 'L')),
(self.true_negatives, ('T', 'L')),
(self.false_positives, ('T', 'L')),
(self.false_negatives, ('T', 'L'))])
if self.label_weights is not None:
# label_weights should be of length equal to the number of labels.
shapes.append((self.label_weights, ('L',)))
deps = [
check_ops.assert_shapes(
shapes, message='Number of labels is not consistent.')
]
# Only forward label_weights to update_confusion_matrix_variables when
# multi_label is False. Otherwise the averaging of individual label AUCs is
# handled in AUC.result
label_weights = None if self.multi_label else self.label_weights
with ops.control_dependencies(deps):
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES:
self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES:
self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES:
self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES:
self.false_negatives,
},
y_true,
y_pred,
self._thresholds,
sample_weight=sample_weight,
multi_label=self.multi_label,
label_weights=label_weights)
def interpolate_pr_auc(self):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
https://www.biostat.wisc.edu/~page/rocpr.pdf
Note here we derive & use a closed formula not present in the paper
as follows:
Precision = TP / (TP + FP) = TP / P
Modeling all of TP (true positive), FP (false positive) and their sum
P = TP + FP (predicted positive) as varying linearly within each interval
[A, B] between successive thresholds, we get
Precision slope = dTP / dP
= (TP_B - TP_A) / (P_B - P_A)
= (TP - TP_A) / (P - P_A)
Precision = (TP_A + slope * (P - P_A)) / P
The area within the interval is (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = self.true_positives[:self.num_thresholds -
1] - self.true_positives[1:]
p = self.true_positives + self.false_positives
dp = p[:self.num_thresholds - 1] - p[1:]
prec_slope = math_ops.div_no_nan(
dtp, math_ops.maximum(dp, 0), name='prec_slope')
intercept = self.true_positives[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
math_ops.div_no_nan(
p[:self.num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
pr_auc_increment = math_ops.div_no_nan(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(self.true_positives[1:] + self.false_negatives[1:], 0),
name='pr_auc_increment')
if self.multi_label:
by_label_auc = math_ops.reduce_sum(
pr_auc_increment, name=self.name + '_by_label', axis=0)
if self.label_weights is None:
# Evenly weighted average of the label AUCs.
return math_ops.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return math_ops.div_no_nan(
math_ops.reduce_sum(
math_ops.multiply(by_label_auc, self.label_weights)),
math_ops.reduce_sum(self.label_weights),
name=self.name)
else:
return math_ops.reduce_sum(pr_auc_increment, name='interpolate_pr_auc')
def result(self):
if (self.curve == metrics_utils.AUCCurve.PR and
self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
if self.curve == metrics_utils.AUCCurve.ROC:
fp_rate = math_ops.div_no_nan(self.false_positives,
self.false_positives + self.true_negatives)
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION:
# Note: the case ('PR', 'interpolation') has been handled above.
heights = (y[:self.num_thresholds - 1] + y[1:]) / 2.
elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
heights = math_ops.minimum(y[:self.num_thresholds - 1], y[1:])
else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
heights = math_ops.maximum(y[:self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
if self.multi_label:
riemann_terms = math_ops.multiply(x[:self.num_thresholds - 1] - x[1:],
heights)
by_label_auc = math_ops.reduce_sum(
riemann_terms, name=self.name + '_by_label', axis=0)
if self.label_weights is None:
# Unweighted average of the label AUCs.
return math_ops.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return math_ops.div_no_nan(
math_ops.reduce_sum(
math_ops.multiply(by_label_auc, self.label_weights)),
math_ops.reduce_sum(self.label_weights),
name=self.name)
else:
return math_ops.reduce_sum(
math_ops.multiply(x[:self.num_thresholds - 1] - x[1:], heights),
name=self.name)
def reset_states(self):
if self.multi_label:
K.batch_set_value([(v, np.zeros((self.num_thresholds, self._num_labels)))
for v in self.variables])
else:
K.batch_set_value([
(v, np.zeros((self.num_thresholds,))) for v in self.variables
])
def get_config(self):
if is_tensor_or_variable(self.label_weights):
label_weights = K.eval(self.label_weights)
else:
label_weights = self.label_weights
config = {
'num_thresholds': self.num_thresholds,
'curve': self.curve.value,
'summation_method': self.summation_method.value,
# We remove the endpoint thresholds as an inverse of how the thresholds
# were initialized. This ensures that a metric initialized from this
# config has the same thresholds.
'thresholds': self.thresholds[1:-1],
'multi_label': self.multi_label,
'label_weights': label_weights
}
base_config = super(AUC, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.CosineSimilarity')
class CosineSimilarity(MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
`cosine similarity = (a . b) / ||a|| ||b||`
See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Standalone usage:
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> m = tf.keras.metrics.CosineSimilarity(axis=1)
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> m.result().numpy()
0.49999997
>>> m.reset_states()
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
... sample_weight=[0.3, 0.7])
>>> m.result().numpy()
0.6999999
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
```
"""
def __init__(self, name='cosine_similarity', dtype=None, axis=-1):
super(CosineSimilarity, self).__init__(
cosine_similarity, name, dtype=dtype, axis=axis)
@keras_export('keras.metrics.MeanAbsoluteError')
class MeanAbsoluteError(MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(MeanMetricWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsolutePercentageError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
250000000.0
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
500000000.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
```
"""
def __init__(self, name='mean_absolute_percentage_error', dtype=None):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredError')
class MeanSquaredError(MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
def __init__(self, name='mean_squared_error', dtype=None):
super(MeanSquaredError, self).__init__(
mean_squared_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.12011322
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.24022643
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name='mean_squared_logarithmic_error', dtype=None):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name, dtype=dtype)
@keras_export('keras.metrics.Hinge')
class Hinge(MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.3
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.1
Usage with `compile()` API:
```python
model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
```
"""
def __init__(self, name='hinge', dtype=None):
super(Hinge, self).__init__(hinge, name, dtype=dtype)
@keras_export('keras.metrics.SquaredHinge')
class SquaredHinge(MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.86
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.46
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
def __init__(self, name='squared_hinge', dtype=None):
super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)
@keras_export('keras.metrics.CategoricalHinge')
class CategoricalHinge(MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.2
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalHinge()])
```
"""
def __init__(self, name='categorical_hinge', dtype=None):
super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)
@keras_export('keras.metrics.RootMeanSquaredError')
class RootMeanSquaredError(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Standalone usage:
>>> m = tf.keras.metrics.RootMeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.70710677
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name='root_mean_squared_error', dtype=None):
super(RootMeanSquaredError, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
error_sq = math_ops.squared_difference(y_pred, y_true)
return super(RootMeanSquaredError, self).update_state(
error_sq, sample_weight=sample_weight)
def result(self):
return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))
@keras_export('keras.metrics.LogCoshError')
class LogCoshError(MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.LogCoshError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.10844523
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.21689045
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.LogCoshError()])
```
"""
def __init__(self, name='logcosh', dtype=None):
super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)
@keras_export('keras.metrics.Poisson')
class Poisson(MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
`metric = y_pred - y_true * log(y_pred)`
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.49999997
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.99999994
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Poisson()])
```
"""
def __init__(self, name='poisson', dtype=None):
super(Poisson, self).__init__(poisson, name, dtype=dtype)
@keras_export('keras.metrics.KLDivergence')
class KLDivergence(MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`.
`metric = y_true * log(y_true / y_pred)`
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.KLDivergence()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.45814306
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162892
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.KLDivergence()])
```
"""
def __init__(self, name='kullback_leibler_divergence', dtype=None):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name, dtype=dtype)
@keras_export('keras.metrics.MeanIoU')
class MeanIoU(Metric):
"""Computes the mean Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
0.33333334
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(self, num_classes, name=None, dtype=None):
super(MeanIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix. Setting
# the type to be `float64` as required by confusion_matrix_ops.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=init_ops.zeros_initializer,
dtype=dtypes.float64)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = array_ops.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = array_ops.reshape(y_true, [-1])
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
if sample_weight.shape.ndims > 1:
sample_weight = array_ops.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=dtypes.float64)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = math_ops.cast(
array_ops.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))
iou = math_ops.div_no_nan(true_positives, denominator)
return math_ops.div_no_nan(
math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)
def reset_states(self):
K.set_value(self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(MeanIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.MeanTensor')
class MeanTensor(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanTensor()
>>> m.update_state([0, 1, 2, 3])
>>> m.update_state([4, 5, 6, 7])
>>> m.result().numpy()
array([2., 3., 4., 5.], dtype=float32)
>>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])
>>> m.result().numpy()
array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)
"""
def __init__(self, name='mean_tensor', dtype=None):
super(MeanTensor, self).__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
def _build(self, shape):
self._shape = tensor_shape.TensorShape(shape)
self._build_input_shape = self._shape
# Create new state variables
self._total = self.add_weight(
'total', shape=shape, initializer=init_ops.zeros_initializer)
self._count = self.add_weight(
'count', shape=shape, initializer=init_ops.zeros_initializer)
with ops.init_scope():
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
values = math_ops.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError('MeanTensor input values must always have the same '
'shape. Expected shape (set during the first call): {}. '
'Got: {}'.format(self._shape, values.shape))
num_values = array_ops.ones_like(values)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.multiply(num_values, sample_weight)
values = math_ops.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with ops.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
'MeanTensor does not have any result yet. Please call the MeanTensor '
'instance or use `.update_state(value)` before retrieving the result.'
)
return math_ops.div_no_nan(self.total, self.count)
def reset_states(self):
if self._built:
K.batch_set_value(
[(v, np.zeros(self._shape.as_list())) for v in self.variables])
@keras_export('keras.metrics.BinaryCrossentropy')
class BinaryCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional )Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`".
Standalone usage:
>>> m = tf.keras.metrics.BinaryCrossentropy()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.81492424
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162905
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(self,
name='binary_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.CategoricalCrossentropy')
class CategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). Here we assume that labels are given as a `one_hot`
representation. eg., When labels values are [2, 0, 1],
`y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]].
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
Standalone usage:
>>> # EPSILON = 1e-7, y = y_true, y` = y_pred
>>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
>>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(y'), axis = -1)
>>> # = -((log 0.95), (log 0.1))
>>> # = [0.051, 2.302]
>>> # Reduced xent = (0.051 + 2.302) / 2
>>> m = tf.keras.metrics.CategoricalCrossentropy()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_states()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalCrossentropy()])
```
"""
def __init__(self,
name='categorical_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` metric.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the metric is
computed.
Standalone usage:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = tf.keras.metrics.SparseCategoricalCrossentropy()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_states()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
```
"""
def __init__(self,
name='sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
class SumOverBatchSize(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
"""
def __init__(self, name='sum_over_batch_size', dtype=None):
super(SumOverBatchSize, self).__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype)
class SumOverBatchSizeMetricWrapper(SumOverBatchSize):
"""Wraps a function with the `SumOverBatchSizeMetricWrapper` metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `SumOverBatchSizeMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
return super(SumOverBatchSizeMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(SumOverBatchSizeMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def accuracy(y_true, y_pred):
[y_pred, y_true], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true])
y_pred.shape.assert_is_compatible_with(y_true.shape)
if y_true.dtype != y_pred.dtype:
y_pred = math_ops.cast(y_pred, y_true.dtype)
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.binary_accuracy')
@dispatch.add_dispatch_support
def binary_accuracy(y_true, y_pred, threshold=0.5):
"""Calculates how often predictions matches binary labels.
Standalone usage:
>>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
>>> assert m.shape == (4,)
>>> m.numpy()
array([1., 1., 1., 1.], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
threshold: (Optional) Float representing the threshold for deciding whether
prediction values are 1 or 0.
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
@dispatch.add_dispatch_support
def categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches one-hot labels.
Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: One-hot ground truth values.
y_pred: The prediction values.
Returns:
Categorical accuracy values.
"""
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
@keras_export('keras.metrics.sparse_categorical_accuracy')
@dispatch.add_dispatch_support
def sparse_categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches integer labels.
Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: Integer ground truth values.
y_pred: The prediction values.
Returns:
Sparse categorical accuracy values.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = ops.convert_to_tensor_v2_with_dispatch(y_true)
y_pred_rank = y_pred.shape.ndims
y_true_rank = y_true.shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.top_k_categorical_accuracy')
@dispatch.add_dispatch_support
def top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often targets are in the top `K` predictions.
Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)
Args:
y_true: The ground truth values.
y_pred: The prediction values.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
Returns:
Top K categorical accuracy value.
"""
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), K.floatx())
@keras_export('keras.metrics.sparse_top_k_categorical_accuracy')
@dispatch.add_dispatch_support
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often integer targets are in the top `K` predictions.
Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
... y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
Returns:
Sparse top K categorical accuracy value.
"""
y_pred_rank = ops.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
y_true = array_ops.reshape(y_true, [-1])
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), K.floatx())
def cosine_proximity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Returns:
Cosine similarity value.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return math_ops.reduce_sum(y_true * y_pred, axis=axis)
# Aliases
acc = ACC = accuracy
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine_similarity = cosine_proximity
log_cosh = logcosh
def clone_metric(metric):
"""Returns a clone of the metric if stateful, otherwise returns it as is."""
if isinstance(metric, Metric):
with ops.init_scope():
return metric.__class__.from_config(metric.get_config())
return metric
def clone_metrics(metrics):
"""Clones the given metric list/dict."""
return nest.map_structure(clone_metric, metrics)
@keras_export('keras.metrics.serialize')
def serialize(metric):
"""Serializes metric function or `Metric` instance.
Arguments:
metric: A Keras `Metric` instance or a metric function.
Returns:
Metric configuration dictionary.
"""
return serialize_keras_object(metric)
@keras_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
"""Deserializes a serialized metric class/function instance.
Arguments:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras `Metric` instance or a metric function.
"""
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@keras_export('keras.metrics.get')
def get(identifier):
"""Retrieves a Keras metric as a `function`/`Metric` class instance.
The `identifier` may be the string name of a metric function or class.
>>> metric = tf.keras.metrics.get("categorical_crossentropy")
>>> type(metric)
<class 'function'>
>>> metric = tf.keras.metrics.get("CategoricalCrossentropy")
>>> type(metric)
<class '...tensorflow.python.keras.metrics.CategoricalCrossentropy'>
You can also specify `config` of the metric to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Metric` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> metric = tf.keras.metrics.get(identifier)
>>> type(metric)
<class '...tensorflow.python.keras.metrics.CategoricalCrossentropy'>
Arguments:
identifier: A metric identifier. One of None or string name of a metric
function/class or metric configuration dictionary or a metric function or
a metric class instance
Returns:
A Keras metric as a `function`/ `Metric` class instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError(
'Could not interpret metric function identifier: {}'.format(identifier))
def is_built_in(cls):
return cls.__module__ == Metric.__module__
| apache-2.0 |
gacarrillor/QGIS | python/plugins/processing/algs/qgis/ui/RasterCalculatorWidgets.py | 25 | 11647 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterCalculatorWidgets.py
---------------------
Date : November 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2016'
__copyright__ = '(C) 2016, Victor Olaya'
import os
from functools import partial
import re
import json
from qgis.utils import iface
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QTextCursor
from qgis.PyQt.QtWidgets import (QLineEdit, QPushButton, QLabel,
QComboBox, QSpacerItem, QSizePolicy,
QListWidgetItem)
from qgis.core import (QgsProcessingUtils,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingOutputRasterLayer,
QgsProject)
from processing.gui.wrappers import WidgetWrapper, DIALOG_STANDARD, DIALOG_BATCH
from processing.gui.BatchInputSelectionPanel import BatchInputSelectionPanel
from processing.tools import dataobjects
from processing.tools.system import userFolder
from processing.gui.wrappers import InvalidParameterValue
from qgis.analysis import QgsRasterCalculatorEntry, QgsRasterCalcNode
pluginPath = os.path.dirname(__file__)
WIDGET_ADD_NEW, BASE_ADD_NEW = uic.loadUiType(
os.path.join(pluginPath, 'AddNewExpressionDialog.ui'))
class AddNewExpressionDialog(BASE_ADD_NEW, WIDGET_ADD_NEW):
def __init__(self, expression):
super(AddNewExpressionDialog, self).__init__()
self.setupUi(self)
self.name = None
self.expression = None
self.txtExpression.setPlainText(expression)
self.buttonBox.rejected.connect(self.cancelPressed)
self.buttonBox.accepted.connect(self.okPressed)
def cancelPressed(self):
self.close()
def okPressed(self):
self.name = self.txtName.text()
self.expression = self.txtExpression.toPlainText()
self.close()
WIDGET_DLG, BASE_DLG = uic.loadUiType(
os.path.join(pluginPath, 'PredefinedExpressionDialog.ui'))
class PredefinedExpressionDialog(BASE_DLG, WIDGET_DLG):
def __init__(self, expression, options):
super(PredefinedExpressionDialog, self).__init__()
self.setupUi(self)
self.filledExpression = None
self.options = options
self.expression = expression
self.variables = set(re.findall(r'\[.*?\]', expression))
self.comboBoxes = {}
for variable in self.variables:
label = QLabel(variable[1:-1])
combo = QComboBox()
for opt in self.options.keys():
combo.addItem(opt)
self.comboBoxes[variable] = combo
self.groupBox.layout().addWidget(label)
self.groupBox.layout().addWidget(combo)
verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.groupBox.layout().addItem(verticalSpacer)
self.buttonBox.rejected.connect(self.cancelPressed)
self.buttonBox.accepted.connect(self.okPressed)
def cancelPressed(self):
self.close()
def okPressed(self):
self.filledExpression = self.expression
for name, combo in self.comboBoxes.items():
self.filledExpression = self.filledExpression.replace(name,
self.options[combo.currentText()])
self.close()
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'RasterCalculatorWidget.ui'))
class ExpressionWidget(BASE, WIDGET):
_expressions = {"NDVI": "([NIR] - [Red]) / ([NIR] + [Red])"}
def __init__(self, options):
super(ExpressionWidget, self).__init__(None)
self.setupUi(self)
self.setList(options)
def doubleClicked(item):
self.text.insertPlainText('"{}"'.format(self.options[item.text()]))
def addButtonText(text):
if any(c for c in text if c.islower()):
self.text.insertPlainText(" {}()".format(text))
self.text.moveCursor(QTextCursor.PreviousCharacter, QTextCursor.MoveAnchor)
else:
self.text.insertPlainText(" {} ".format(text))
buttons = [b for b in self.buttonsGroupBox.children()if isinstance(b, QPushButton)]
for button in buttons:
button.clicked.connect(partial(addButtonText, button.text()))
self.listWidget.itemDoubleClicked.connect(doubleClicked)
self.expressions = {}
if os.path.exists(self.expsFile()):
with open(self.expsFile()) as f:
self.expressions.update(json.load(f))
self.expressions.update(self._expressions)
self.fillPredefined()
self.buttonAddPredefined.clicked.connect(self.addPredefined)
self.buttonSavePredefined.clicked.connect(self.savePredefined)
self.text.textChanged.connect(self.expressionValid)
def expressionValid(self):
errorString = ''
testNode = QgsRasterCalcNode.parseRasterCalcString(self.text.toPlainText(), errorString)
if not self.text.toPlainText():
self.expressionErrorLabel.setText(self.tr('Expression is empty'))
self.expressionErrorLabel.setStyleSheet("QLabel { color: black; }")
return False
if testNode:
self.expressionErrorLabel.setText(self.tr('Expression is valid'))
self.expressionErrorLabel.setStyleSheet("QLabel { color: green; font-weight: bold; }")
return True
self.expressionErrorLabel.setText(self.tr('Expression is not valid ') + errorString)
self.expressionErrorLabel.setStyleSheet("QLabel { color : red; font-weight: bold; }")
return False
def expsFile(self):
return os.path.join(userFolder(), 'rastercalcexpressions.json')
def addPredefined(self):
expression = self.expressions[self.comboPredefined.currentText()]
dlg = PredefinedExpressionDialog(expression, self.options)
dlg.exec_()
if dlg.filledExpression:
self.text.setPlainText(dlg.filledExpression)
def savePredefined(self):
exp = self.text.toPlainText()
used = [v for v in self.options.values() if v in exp]
for i, v in enumerate(used):
exp = exp.replace(v, f'[{chr(97 + i)}]')
dlg = AddNewExpressionDialog(exp)
dlg.exec_()
if dlg.name:
self.expressions[dlg.name] = dlg.expression
with open(self.expsFile(), "w") as f:
f.write(json.dumps(self.expressions))
def fillPredefined(self):
self.comboPredefined.clear()
for expression in self.expressions:
self.comboPredefined.addItem(expression)
def setList(self, options):
self.options = options
self.listWidget.clear()
entries = QgsRasterCalculatorEntry.rasterEntries()
def _find_source(name):
for entry in entries:
if entry.ref == name:
return entry.raster.source()
return ''
for name in options.keys():
item = QListWidgetItem(name, self.listWidget)
tooltip = _find_source(name)
if tooltip:
item.setData(Qt.ToolTipRole, tooltip)
self.listWidget.addItem(item)
def setValue(self, value):
self.text.setPlainText(value)
def value(self):
return self.text.toPlainText()
class ExpressionWidgetWrapper(WidgetWrapper):
def _panel(self, options):
return ExpressionWidget(options)
def _get_options(self):
entries = QgsRasterCalculatorEntry.rasterEntries()
options = {}
for entry in entries:
options[entry.ref] = entry.ref
return options
def createWidget(self):
if self.dialogType == DIALOG_STANDARD:
if iface is not None and iface.layerTreeView() is not None and iface.layerTreeView().layerTreeModel() is not None:
iface.layerTreeView().layerTreeModel().dataChanged.connect(self.refresh)
return self._panel(self._get_options())
elif self.dialogType == DIALOG_BATCH:
return QLineEdit()
else:
layers = self.dialog.getAvailableValuesOfType([QgsProcessingParameterRasterLayer], [QgsProcessingOutputRasterLayer])
options = {self.dialog.resolveValueDescription(lyr): "{}@1".format(self.dialog.resolveValueDescription(lyr)) for lyr in layers}
self.widget = self._panel(options)
return self.widget
def refresh(self, *args):
self.widget.setList(self._get_options())
def setValue(self, value):
if self.dialogType == DIALOG_STANDARD:
pass # TODO
elif self.dialogType == DIALOG_BATCH:
return self.widget.setText(value)
else:
self.widget.setValue(value)
def value(self):
if self.dialogType == DIALOG_STANDARD:
return self.widget.value()
elif self.dialogType == DIALOG_BATCH:
return self.widget.text()
else:
return self.widget.value()
class LayersListWidgetWrapper(WidgetWrapper):
def createWidget(self):
if self.dialogType == DIALOG_BATCH:
widget = BatchInputSelectionPanel(self.parameterDefinition(), self.row, self.col, self.dialog)
widget.valueChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return widget
else:
return None
def setValue(self, value):
if self.dialogType == DIALOG_BATCH:
return self.widget.setText(value)
def value(self):
if self.dialogType == DIALOG_STANDARD:
if self.param.datatype == dataobjects.TYPE_FILE:
return self.param.setValue(self.widget.selectedoptions)
else:
if self.param.datatype == dataobjects.TYPE_RASTER:
options = QgsProcessingUtils.compatibleRasterLayers(QgsProject.instance(), False)
elif self.param.datatype == dataobjects.TYPE_VECTOR_ANY:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [], False)
else:
options = QgsProcessingUtils.compatibleVectorLayers(QgsProject.instance(), [self.param.datatype], False)
return [options[i] for i in self.widget.selectedoptions]
elif self.dialogType == DIALOG_BATCH:
return self.widget.getText()
else:
options = self._getOptions()
values = [options[i] for i in self.widget.selectedoptions]
if len(values) == 0 and not self.parameterDefinition().flags() & QgsProcessingParameterDefinition.FlagOptional:
raise InvalidParameterValue()
return values
| gpl-2.0 |
jjdmol/LOFAR | LCU/checkhardware/updatePVSS.py | 1 | 21517 | #!/usr/bin/python
#
# read last test log file (.csv)
# and send test result to PVSS,
# and write to PVSS log file
#
# P.Donker
import sys
import os
from time import sleep
libPath = '/opt/stationtest/lib'
sys.path.insert(0, libPath)
from general_lib import *
from lofar_lib import *
args = dict()
logdir = ""
logger = 0
nLBL = 0
nLBH = 0
nHBA = 0
nRSP = 0
# PVSS states
State = dict({'OFF':0, 'OPERATIONAL':1, 'MAINTENANCE':2, 'TEST':3, 'SUSPICIOUS':4, 'BROKEN':5})
def main():
global args, logdir, logger, nRSP, nLBL, nLBH, nHBA
getArguments()
logdir = getLogDir()
ID, nRSP, nTBB, nLBL, nLBH, nHBA, HBA_SPLIT = readStationConfig()
logger = cPVSSLogger(logdir)
if args.has_key('RESET'):
resetPVSS(state=0)
if args.has_key('NO_UPDATE'):
print "skip PVSS update"
addManualDataToPVSS()
# read last log file from checkhardware
testfilename = '%s_StationTest.csv' %(getHostName())
fullFilename = os.path.join(logdir, testfilename)
if args.has_key('FILE'):
fullFilename = args.get('FILE')
try:
f = open(fullFilename, 'r')
except IOError:
print "file not found %s" %(fullFilename)
return
testdata = f.readlines()
f.close()
bad_lba, bad_hba = addDataToPVSS(testdata)
addDataToBadRcuFile(bad_lba, bad_hba)
# print help screen
def printHelp():
print "----------------------------------------------------------------------------"
print "Usage of arguments"
print "Output of last stationcheck is always send to pvss also the bad_rcu file is made"
print "-h : this help screen"
print "-reset[=type] : set all state fields to ok for type if given"
print " type = all | lba | lbl | lbh | hba (all=default)"
print "-no_update : skip pvss update"
print "-test : do not send to PVSS"
print "-file=[full filename]: filename to use"
print ""
#print "-L=x : x = flag level"
print " NEXT KEYS ARE ONLY USED FOR HBA ERRORS"
print "-S=x : rf, flag only if deviation greater than x dB"
print "-N=x,y,z : noise, flag only if available more than x% of time (x=0..100)"
print " or available more than y% of time and fluctuation > z dB"
print "-J=x,y,z : jitter, flag only if available more than x% of time (x=0..100)"
print " or available more than y% of time and fluctuation > z dB"
print "-SN : do not flag summator noise"
print "-SP : do not flag spurious signals"
print "-O : do not flag oscillating signals"
print "-M=x : modem, flag only if error in x elements (x=0..16)"
print "-E : do not flag results of element test"
print " NEXT KEYS ARE ONLY USED FOR LBA ERRORS"
print "-LBLS=x : lbl rf, flag only if deviation greater than x dB"
print "-LBLN=x,y,z : noise, flag only if available more than x% of time (x=0..100)"
print " or available more than y% of time and fluctuation > z dB"
print "-LBLJ=x,y,z : jitter, flag only if available more than x% of time (x=0..100)"
print " or available more than y% of time and fluctuation > z dB"
print "-LBHS=x : lbh rf, flag only if deviation greater than x dB"
print "-LBHN=x,y,z : noise, flag only if available more than x% of time (x=0..100)"
print " or available more than y% of time and fluctuation > z dB"
print "-LBHJ=x,y,z : jitter, flag only if available more than x% of time (x=0..100)"
print " or available more than y% of time and fluctuation > z dB"
# get command line arguments
def getArguments():
global args
for i in range(len(sys.argv)):
if sys.argv[i][0] == '-':
if sys.argv[i].find('=') != -1:
valpos = sys.argv[i].find('=')
key = sys.argv[i][1:valpos].upper()
val = sys.argv[i][valpos+1:].split(',')
if len(val) > 1:
args[key] = val
else:
args[key] = val[0]
else:
args[sys.argv[i][1:].upper()]='-'
if args.has_key('H') or args.has_key('HELP'):
printHelp()
sys.exit()
return
# get logdir from configuration file
def getLogDir():
logdir = ""
# look for log directory
f = open("/opt/stationtest/checkHardware.conf", 'r')
data = f.readlines()
f.close()
for line in data:
if line.find('log-dir-local') != -1:
key, logdir = line.strip().split('=')
return(logdir)
# send comment, key and value to PVSS and write to file
def sendToPVSS(comment, pvss_key, value):
global logger, args
if args.has_key('NO_UPDATE'):
return("")
if len(comment) > 0:
comment = 'stationtest::'+comment
else:
comment = 'stationtest'
arguments = '%s %s %d' %(comment, pvss_key, value)
logger.addLine(arguments[11:])
if args.has_key('TEST'):
print arguments
else:
response = sendCmd('setObjectState', arguments)
sleep(0.2)
return(response)
return("")
# set all antenna info to ok
def resetPVSS(state=0):
global args, libPath, State, nRSP, nLBL, nLBH, nHBA
reset_type = args.get('RESET','ALL').upper()
filename = "reset_pvss.log"
full_filename = os.path.join(libPath, filename)
f = open(full_filename, 'w')
if reset_type == 'ALL':
for rcu in range(nRSP*8):
board = int(rcu / 8)
rack = int(board / 4)
cabinet = int(rack / 2)
f.write("LOFAR_PIC_Cabinet%d_Subrack%d_RSPBoard%d_RCU%d %d\n" %(cabinet, rack, board, rcu, state))
if reset_type in ('ALL','LBA','LBH'):
for ant in range(nLBH):
f.write("LOFAR_PIC_LBA%03d %d\n" %(ant, state))
if reset_type in ('ALL','LBA','LBL'):
for ant in range(nLBL):
f.write("LOFAR_PIC_LBA%03d %d\n" %(ant+48, state))
if reset_type in ('ALL','HBA'):
for tile in range(nHBA):
f.write("LOFAR_PIC_HBA%02d %d\n" %(tile, state))
for elem in range(16):
f.write("LOFAR_PIC_HBA%02d.element%02d %d\n" %(tile, elem, state))
f.write("LOFAR_PIC_HBA%02d.element%02d.comm %d\n" %(tile, elem, state))
f.write("LOFAR_PIC_HBA%02d.element%02d.X %d\n" %(tile, elem, state))
f.write("LOFAR_PIC_HBA%02d.element%02d.Y %d\n" %(tile, elem, state))
f.close()
if not args.has_key('TEST'):
sendCmd("setObjectState", "stationtest:reset %s" %(full_filename))
sleep(5.0)
# add manual filled list with bad antennas to pvss
def addManualDataToPVSS():
global State, logdir
filename = "bad_antenna_list.txt"
full_filename = "/globalhome/log/bad_antenna_list.txt"
try:
f = open(full_filename, 'r')
except IOError:
print "%s not found" %(filename)
return
data = f.read()
f.close()
for line in data:
if line[0] == '#':
continue
if line.upper().find(getHostName()) > -1:
bad_antenna_list = line.strip().split(' ')[1:]
for ant in bad_antenna_list:
part = ant[:3].upper()
part_nr = int(ant[3:])
if part == 'LBA':
sendToPVSS("manualy-marked", "LOFAR_PIC_LBA%03d" %(part_nr), State['BROKEN'])
if part == 'HBA':
sendToPVSS("manualy-marked", "LOFAR_PIC_HBA%02d" %(part_nr), State['BROKEN'])
return
# add result data from checkhardware to PVSS
def addDataToPVSS(data):
global args
global State
bad_lba = dict()
bad_hba = dict()
RFrefX = 0.0
RFrefY = 0.0
for line in data:
if line[0] == '#':
continue
keyinfo = dict()
info = line.split(',')
#print info
date = info[0]
part = info[1]
partNr = '---'
if info[2] != '---':
partNr = int(info[2])
msgType = info[3].strip()
for i in range(4,len(info)):
if info[i].find('=') != -1:
key, valstr = info[i].split('=')
vallist = valstr.split()
if len(vallist) == 1:
keyinfo[key] = vallist[0]
elif len(vallist) > 1:
keyinfo[key] = vallist
else:
keyinfo[info[i]] = '-'
if part == 'LBL':
lban_limits = args.get('LBLN','0.0')
lbaj_limits = args.get('LBLJ','0.0')
lbas_limit = args.get('LBLS','0.0')
elif part == 'LBH':
lban_limits = args.get('LBHN','0.0')
lbaj_limits = args.get('LBHJ','0.0')
lbas_limit = args.get('LBHS','0.0')
if part in ('LBL', 'LBH'):
if msgType == 'TESTSIGNAL':
RFrefX = float(keyinfo.get('SIGNALX','0.0'))
RFrefY = float(keyinfo.get('SIGNALY','0.0'))
if msgType == 'LOW_NOISE':
if float(keyinfo.get('Xproc','0.0')) >= 100.0 or float(keyinfo.get('Yproc','0.0')) >= 100.0:
sendToPVSS("low-noise", "LOFAR_PIC_LBA%03d" %(partNr), State['BROKEN'])
bad_lba[partNr] = 1
elif msgType == 'HIGH_NOISE':
proc_limit_2 = 0.0
diff_limit = 0.0
if len(lban_limits) > 1:
proc_limit_1 = float(lban_limits[0])
proc_limit_2 = float(lban_limits[1])
diff_limit = float(lban_limits[2])
else:
proc_limit_1 = float(lban_limits)
if float(keyinfo.get('Xproc','0.0')) >= proc_limit_1 or float(keyinfo.get('Yproc','0.0')) >= proc_limit_1:
if ((float(keyinfo.get('Xproc','0.0')) < proc_limit_2 and (float(keyinfo.get('Xval','0.0')) - float(keyinfo.get('Xref','0.0'))) < diff_limit) and
(float(keyinfo.get('Yproc','0.0')) < proc_limit_2 and (float(keyinfo.get('Yval','0.0')) - float(keyinfo.get('Yref','0.0'))) < diff_limit)):
pass
else:
sendToPVSS("noise", "LOFAR_PIC_LBA%03d" %(partNr), State['BROKEN'])
bad_lba[partNr] = 1
elif msgType == 'JITTER':
proc_limit_2 = 0.0
diff_limit = 0.0
if len(lbaj_limits) > 1:
proc_limit_1 = float(lbaj_limits[0])
proc_limit_2 = float(lbaj_limits[1])
diff_limit = float(lbaj_limits[2])
else:
proc_limit_1 = float(lbaj_limits)
if float(keyinfo.get('Xproc','0.0')) >= proc_limit_1 or float(keyinfo.get('Yproc','0.0')) >= proc_limit_1:
if ((float(keyinfo.get('Xproc','0.0')) < proc_limit_2 and float(keyinfo.get('Xdiff','0.0')) < diff_limit) and
(float(keyinfo.get('Yproc','0.0')) < proc_limit_2 and float(keyinfo.get('Ydiff','0.0')) < diff_limit)):
pass
else:
sendToPVSS("jitter", "LOFAR_PIC_LBA%03d" %(partNr), State['BROKEN'])
bad_lba[partNr] = 1
elif msgType == 'OSCILLATION':
sendToPVSS("oscillating", "LOFAR_PIC_LBA%03d" %(partNr), State['BROKEN'])
bad_lba[partNr] = 1
elif msgType == 'RF_FAIL':
comment = "rf-fail-"
flag = False
X = float(keyinfo.get('X','0.0'))
Y = float(keyinfo.get('Y','0.0'))
if X > 0.0:
if abs(X - RFrefX) > float(lbas_limit):
comment += "X"
flag = True
if Y > 0.0:
if abs(Y - RFrefY) > float(lbas_limit):
comment += "Y"
flag = True
if flag:
#print 'LBL %3.1f (%3.1f) %3.1f (%3.1f)' %(X, RFrefX, Y, RFrefY)
sendToPVSS(comment, "LOFAR_PIC_LBA%03d" %(partNr), State['BROKEN'])
bad_lba[partNr] = 1
elif msgType == 'DOWN':
sendToPVSS("down", "LOFAR_PIC_LBA%03d" %(partNr), State['BROKEN'])
bad_lba[partNr] = 1
if part == 'HBA':
if msgType == 'LOW_NOISE':
if float(keyinfo.get('Xproc','0.0')) >= 100.0 or float(keyinfo.get('Yproc','0.0')) >= 100.0:
sendToPVSS("low-noise", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'HIGH_NOISE':
limits = args.get('N','0.0')
proc_limit_2 = 0.0
diff_limit = 0.0
if len(limits) > 1:
proc_limit_1 = float(limits[0])
proc_limit_2 = float(limits[1])
diff_limit = float(limits[2])
else:
proc_limit_1 = float(limits)
if float(keyinfo.get('Xproc','0.0')) >= proc_limit_1 or float(keyinfo.get('Yproc','0.0')) >= proc_limit_1:
if ((float(keyinfo.get('Xproc','0.0')) < proc_limit_2 and (float(keyinfo.get('Xval','0.0')) - float(keyinfo.get('Xref','0.0'))) < diff_limit) and
(float(keyinfo.get('Yproc','0.0')) < proc_limit_2 and (float(keyinfo.get('Yval','0.0')) - float(keyinfo.get('Yref','0.0'))) < diff_limit)):
pass
else:
sendToPVSS("noise", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'JITTER':
limits = args.get('J','0.0')
proc_limit_2 = 0.0
diff_limit = 0.0
if len(limits) > 1:
proc_limit_1 = float(limits[0])
proc_limit_2 = float(limits[1])
diff_limit = float(limits[2])
else:
proc_limit_1 = float(limits)
if float(keyinfo.get('Xproc','0.0')) >= proc_limit_1 or float(keyinfo.get('Yproc','0.0')) >= proc_limit_1:
if ((float(keyinfo.get('Xproc','0.0')) < proc_limit_2 and float(keyinfo.get('Xdiff','0.0')) < diff_limit) and
(float(keyinfo.get('Yproc','0.0')) < proc_limit_2 and float(keyinfo.get('Ydiff','0.0')) < diff_limit)):
pass
else:
sendToPVSS("jitter", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'OSCILLATION':
if not args.has_key('O'):
sendToPVSS("oscillating", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'C_SUMMATOR':
sendToPVSS("modem-fail", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'SUMMATOR_NOISE':
if not args.has_key('SN'):
sendToPVSS("summator-noise", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'SPURIOUS':
if not args.has_key('SP'):
sendToPVSS("spurious-signals", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'RF_FAIL':
flag = False
limit = float(args.get('S','0'))
X = keyinfo.get('X',[])
Y = keyinfo.get('Y',[])
if len(X):
if abs(float(X[0]) - float(X[2])) > limit:
flag = True
if len(Y):
if abs(float(Y[0]) - float(Y[2])) > limit:
flag = True
if flag:
sendToPVSS("rf-tile-fail", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
elif msgType == 'E_FAIL':
if args.has_key('E') == False:
max_errors = 2
modem_errors = 0
LNX_errors = 0
LNY_errors = 0
RFX_errors = 0
RFY_errors = 0
# check first total number of errors in tile
for elem_nr in range(1,17,1):
if keyinfo.has_key('M%d' %(elem_nr)):
modem_errors += 1
if keyinfo.has_key('LNX%d' %(elem_nr)):
LNX_errors += 1
if keyinfo.has_key('LNY%d' %(elem_nr)):
LNY_errors += 1
if keyinfo.has_key('X%d' %(elem_nr)):
RFX_errors += 1
if keyinfo.has_key('Y%d' %(elem_nr)):
RFY_errors += 1
send_tile_errors = 0
for elem_nr in range(1,17,1):
send_elem_errors = 0
if modem_errors > max_errors and keyinfo.has_key('M%d' %(elem_nr)):
sendToPVSS("rf-fail", "LOFAR_PIC_HBA%02d.element%02d.comm" %(partNr, elem_nr-1), State['BROKEN'])
send_elem_errors += 1
comment = ""
if (RFX_errors > max_errors) and keyinfo.has_key('X%d' %(elem_nr)):
comment += "rf-fail&"
if (LNX_errors > max_errors) and keyinfo.has_key('LNX%d' %(elem_nr)):
comment += "low-noise&"
if keyinfo.has_key('HNX%d' %(elem_nr)) or keyinfo.has_key('JX%d' %(elem_nr)):
comment += "noise&"
if len(comment) > 0:
sendToPVSS(comment[:-1], "LOFAR_PIC_HBA%02d.element%02d.X" %(partNr, elem_nr-1), State['BROKEN'])
send_elem_errors += 1
comment = ""
if (RFY_errors > max_errors) and keyinfo.has_key('Y%d' %(elem_nr)):
comment += "rf-fail&"
if (LNY_errors > max_errors) and keyinfo.has_key('LNY%d' %(elem_nr)):
comment += "low-noise&"
if keyinfo.has_key('HNY%d' %(elem_nr)) or keyinfo.has_key('JY%d' %(elem_nr)):
comment += "noise&"
if len(comment) > 0:
sendToPVSS(comment[:-1], "LOFAR_PIC_HBA%02d.element%02d.Y" %(partNr, elem_nr-1), State['BROKEN'])
send_elem_errors += 1
if send_elem_errors > 0:
sendToPVSS("rf-fail", "LOFAR_PIC_HBA%02d.element%02d" %(partNr, elem_nr-1), State['BROKEN'])
send_tile_errors += 1
if send_tile_errors > 0:
sendToPVSS("", "LOFAR_PIC_HBA%02d" %(partNr), State['BROKEN'])
bad_hba[partNr] = 1
return (list(bad_lba), list(bad_hba))
# write bad rcu's to file in logdir
def addDataToBadRcuFile(bad_lba, bad_hba):
global nLBL
global nLBH
# add bad rcus to file
filename = '%s_bad_rcus.txt' %(getHostName())
full_filename = os.path.join(logdir, filename)
f = open(full_filename, 'w')
lbl = ""
lbh = ""
for ant in sorted(bad_lba):
if (nLBL > 0) and (ant > nLBH):
lbl += "%d," %((ant-nLBL)*2)
lbl += "%d," %((ant-nLBL)*2+1)
else:
lbh += "%d," %(ant*2)
lbh += "%d," %(ant*2+1)
if len(lbl):
lbl = lbl[:-1]
lbl = "LBL=[" + lbl + "]\n"
f.write(lbl)
if len(lbh):
lbh = lbh[:-1]
lbh = "LBH=[" + lbh + "]\n"
f.write(lbh)
hba = ""
for tile in sorted(bad_hba):
hba += "%d," %(tile*2)
hba += "%d," %(tile*2+1)
if len(hba):
hba = hba[:-1]
hba = "HBA=[" + hba + "]\n"
f.write(hba)
f.close()
if __name__ == "__main__":
main() | gpl-3.0 |
jpshort/odoo | addons/purchase/edi/purchase_order.py | 439 | 9703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.edi import EDIMixin
PURCHASE_ORDER_LINE_EDI_STRUCT = {
'name': True,
'date_planned': True,
'product_id': True,
'product_uom': True,
'price_unit': True,
'product_qty': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
PURCHASE_ORDER_EDI_STRUCT = {
'company_id': True, # -> to be changed into partner
'name': True,
'partner_ref': True,
'origin': True,
'date_order': True,
'partner_id': True,
#custom: 'partner_address',
'notes': True,
'order_line': PURCHASE_ORDER_LINE_EDI_STRUCT,
#custom: currency_id
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'state':True,
}
class purchase_order(osv.osv, EDIMixin):
_inherit = 'purchase.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a purchase order"""
edi_struct = dict(edi_struct or PURCHASE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(purchase_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'sale.order',
'__import_module': 'sale',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
})
if edi_doc.get('order_line'):
for line in edi_doc['order_line']:
line['__import_model'] = 'sale.order.line'
edi_doc_list.append(edi_doc)
return edi_doc_list
def edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['supplier'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist_purchase
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'purchase', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'purchase'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def _edi_get_location(self, cr, uid, partner_id, context=None):
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
location = partner.property_stock_customer
if not location:
location = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock', context=context)
return self.edi_m2o(cr, uid, location, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self.edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['partner_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
edi_document['location_id'] = self._edi_get_location(cr, uid, partner_id, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
edi_document.pop('payment_term', None)
edi_document.pop('order_policy', None)
edi_document.pop('user_id', None)
for order_line in edi_document['order_line']:
self._edi_requires_attributes(('date_planned', 'product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
# original sale order contains unit price and discount, but not final line price
discount = order_line.pop('discount', 0.0)
if discount:
order_line['price_unit'] = res_currency.round(cr, uid, order_currency,
(order_line['price_unit'] * (1 - (discount or 0.0) / 100.0)))
# sale order lines have sequence numbers, not purchase order lines
order_line.pop('sequence', None)
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(purchase_order,self).edi_import(cr, uid, edi_document, context=context)
class purchase_order_line(osv.osv, EDIMixin):
_inherit='purchase.order.line'
| agpl-3.0 |
heeraj123/oh-mainline | vendor/packages/ghettoq/ghettoq/tests/test_redis.py | 17 | 2267 | import unittest
from anyjson import serialize, deserialize
from ghettoq.simple import Connection, Empty
def create_connection(database):
return Connection("redis", host="localhost", database=database)
class TestRedisBackend(unittest.TestCase):
def test_default_database_is_set_correctly(self):
conn1 = create_connection("")
conn2 = create_connection("/")
conn3 = create_connection("")
conn4 = create_connection(None)
self.assertEquals(conn1.database, 0)
self.assertEquals(conn2.database, 0)
self.assertEquals(conn3.database, 0)
self.assertEquals(conn4.database, 0)
def test_database_name_is_set_correctly(self):
conn1 = create_connection("1")
conn2 = create_connection("/2")
conn3 = create_connection(3)
self.assertEquals(conn1.database, 1)
self.assertEquals(conn2.database, 2)
self.assertEquals(conn3.database, 3)
def test_invalid_database_name_raises_AttributeError(self):
self.assertRaises(AttributeError, create_connection, "string")
self.assertRaises(AttributeError, create_connection, "1string")
self.assertRaises(AttributeError, create_connection, "/string")
def test_empty_raises_Empty(self):
conn = create_connection(1)
q = conn.Queue("testing")
self.assertRaises(Empty, q.get)
def test_queue_is_empty_after_purge(self):
conn = create_connection(1)
q = conn.Queue("test_queue")
q.put(serialize({"name": "George Constanza"}))
q.put(serialize({"name": "George Constanza"}))
q.purge()
self.assertRaises(Empty, q.get)
def test_put__get(self):
conn = create_connection(1)
q = conn.Queue("testing")
q.put(serialize({"name": "George Constanza"}))
self.assertEquals(deserialize(q.get()),
{"name": "George Constanza"})
def test_empty_queueset_raises_Empty(self):
conn = create_connection(1)
a, b, c, = conn.Queue("a"), conn.Queue("b"), conn.Queue("c")
queueset = conn.QueueSet(queue.name for queue in (a, b, c))
for queue in a, b, c:
self.assertRaises(Empty, queue.get)
self.assertRaises(Empty, queueset.get)
| agpl-3.0 |
yceruto/django-guardian | guardian/core.py | 23 | 5160 | from __future__ import unicode_literals
from itertools import chain
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
from guardian.compat import get_user_model
class ObjectPermissionChecker(object):
"""
Generic object permissions checker class being the heart of
``django-guardian``.
.. note::
Once checked for single object, permissions are stored and we don't hit
database again if another check is called for this object. This is great
for templates, views or other request based checks (assuming we don't
have hundreds of permissions on a single object as we fetch all
permissions for checked object).
On the other hand, if we call ``has_perm`` for perm1/object1, then we
change permission state and call ``has_perm`` again for same
perm1/object1 on same instance of ObjectPermissionChecker we won't see a
difference as permissions are already fetched and stored within cache
dictionary.
"""
def __init__(self, user_or_group=None):
"""
:param user_or_group: should be an ``User``, ``AnonymousUser`` or
``Group`` instance
"""
self.user, self.group = get_identity(user_or_group)
self._obj_perms_cache = {}
def has_perm(self, perm, obj):
"""
Checks if user/group has given permission for object.
:param perm: permission as string, may or may not contain app_label
prefix (if not prefixed, we grab app_label from ``obj``)
:param obj: Django model instance for which permission should be checked
"""
perm = perm.split('.')[-1]
if self.user and not self.user.is_active:
return False
elif self.user and self.user.is_superuser:
return True
return perm in self.get_perms(obj)
def get_perms(self, obj):
"""
Returns list of ``codename``'s of all permissions for given ``obj``.
:param obj: Django model instance for which permission should be checked
"""
User = get_user_model()
ctype = ContentType.objects.get_for_model(obj)
key = self.get_local_cache_key(obj)
if not key in self._obj_perms_cache:
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.permission.field.related_query_name()
if self.user:
fieldname = '%s__group__%s' % (
group_rel_name,
User.groups.field.related_query_name(),
)
group_filters = {fieldname: self.user}
else:
group_filters = {'%s__group' % group_rel_name: self.group}
if group_model.objects.is_generic():
group_filters.update({
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
})
else:
group_filters['%s__content_object' % group_rel_name] = obj
if self.user and not self.user.is_active:
return []
elif self.user and self.user.is_superuser:
perms = list(chain(*Permission.objects
.filter(content_type=ctype)
.values_list("codename")))
elif self.user:
model = get_user_obj_perms_model(obj)
related_name = model.permission.field.related_query_name()
user_filters = {'%s__user' % related_name: self.user}
if model.objects.is_generic():
user_filters.update({
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
})
else:
user_filters['%s__content_object' % related_name] = obj
perms_qs = Permission.objects.filter(content_type=ctype)
# Query user and group permissions separately and then combine
# the results to avoid a slow query
user_perms_qs = perms_qs.filter(**user_filters)
user_perms = user_perms_qs.values_list("codename", flat=True)
group_perms_qs = perms_qs.filter(**group_filters)
group_perms = group_perms_qs.values_list("codename", flat=True)
perms = list(set(chain(user_perms, group_perms)))
else:
perms = list(set(chain(*Permission.objects
.filter(content_type=ctype)
.filter(**group_filters)
.values_list("codename"))))
self._obj_perms_cache[key] = perms
return self._obj_perms_cache[key]
def get_local_cache_key(self, obj):
"""
Returns cache key for ``_obj_perms_cache`` dict.
"""
ctype = ContentType.objects.get_for_model(obj)
return (ctype.id, obj.pk)
| bsd-2-clause |
mrshelly/openerp71313 | openerp/tests/test_fields.py | 28 | 5942 | #
# test cases for fields access, etc.
#
import common
from openerp.osv import fields
class TestRelatedField(common.TransactionCase):
def setUp(self):
super(TestRelatedField, self).setUp()
self.partner = self.registry('res.partner')
self.company = self.registry('res.company')
def test_0_related(self):
""" test an usual related field """
# add a related field test_related_company_id on res.partner
old_columns = self.partner._columns
self.partner._columns = dict(old_columns)
self.partner._columns.update({
'related_company_partner_id': fields.related('company_id', 'partner_id', type='many2one', obj='res.partner'),
})
# find a company with a non-null partner_id
ids = self.company.search(self.cr, self.uid, [('partner_id', '!=', False)], limit=1)
id = ids[0]
# find partners that satisfy [('partner_id.company_id', '=', id)]
company_ids = self.company.search(self.cr, self.uid, [('partner_id', '=', id)])
partner_ids1 = self.partner.search(self.cr, self.uid, [('company_id', 'in', company_ids)])
partner_ids2 = self.partner.search(self.cr, self.uid, [('related_company_partner_id', '=', id)])
self.assertEqual(partner_ids1, partner_ids2)
# restore res.partner fields
self.partner._columns = old_columns
def do_test_company_field(self, field):
# get a partner with a non-null company_id
ids = self.partner.search(self.cr, self.uid, [('company_id', '!=', False)], limit=1)
partner = self.partner.browse(self.cr, self.uid, ids[0])
# check reading related field
self.assertEqual(partner[field], partner.company_id)
# check that search on related field is equivalent to original field
ids1 = self.partner.search(self.cr, self.uid, [('company_id', '=', partner.company_id.id)])
ids2 = self.partner.search(self.cr, self.uid, [(field, '=', partner.company_id.id)])
self.assertEqual(ids1, ids2)
def test_1_single_related(self):
""" test a related field with a single indirection like fields.related('foo') """
# add a related field test_related_company_id on res.partner
# and simulate a _inherits_reload() to populate _all_columns.
old_columns = self.partner._columns
old_all_columns = self.partner._all_columns
self.partner._columns = dict(old_columns)
self.partner._all_columns = dict(old_all_columns)
self.partner._columns.update({
'single_related_company_id': fields.related('company_id', type='many2one', obj='res.company'),
})
self.partner._all_columns.update({
'single_related_company_id': fields.column_info('single_related_company_id', self.partner._columns['single_related_company_id'], None, None, None)
})
self.do_test_company_field('single_related_company_id')
# restore res.partner fields
self.partner._columns = old_columns
self.partner._all_columns = old_all_columns
def test_2_related_related(self):
""" test a related field referring to a related field """
# add a related field on a related field on res.partner
# and simulate a _inherits_reload() to populate _all_columns.
old_columns = self.partner._columns
old_all_columns = self.partner._all_columns
self.partner._columns = dict(old_columns)
self.partner._all_columns = dict(old_all_columns)
self.partner._columns.update({
'single_related_company_id': fields.related('company_id', type='many2one', obj='res.company'),
'related_related_company_id': fields.related('single_related_company_id', type='many2one', obj='res.company'),
})
self.partner._all_columns.update({
'single_related_company_id': fields.column_info('single_related_company_id', self.partner._columns['single_related_company_id'], None, None, None),
'related_related_company_id': fields.column_info('related_related_company_id', self.partner._columns['related_related_company_id'], None, None, None)
})
self.do_test_company_field('related_related_company_id')
# restore res.partner fields
self.partner._columns = old_columns
self.partner._all_columns = old_all_columns
def test_3_read_write(self):
""" write on a related field """
# add a related field test_related_company_id on res.partner
old_columns = self.partner._columns
self.partner._columns = dict(old_columns)
self.partner._columns.update({
'related_company_partner_id': fields.related('company_id', 'partner_id', type='many2one', obj='res.partner'),
})
# find a company with a non-null partner_id
company_ids = self.company.search(self.cr, self.uid, [('partner_id', '!=', False)], limit=1)
company = self.company.browse(self.cr, self.uid, company_ids[0])
# find partners that satisfy [('partner_id.company_id', '=', company.id)]
partner_ids = self.partner.search(self.cr, self.uid, [('related_company_partner_id', '=', company.id)])
partner = self.partner.browse(self.cr, self.uid, partner_ids[0])
# create a new partner, and assign it to company
new_partner_id = self.partner.create(self.cr, self.uid, {'name': 'Foo'})
partner.write({'related_company_partner_id': new_partner_id})
company = self.company.browse(self.cr, self.uid, company_ids[0])
self.assertEqual(company.partner_id.id, new_partner_id)
partner = self.partner.browse(self.cr, self.uid, partner_ids[0])
self.assertEqual(partner.related_company_partner_id.id, new_partner_id)
# restore res.partner fields
self.partner._columns = old_columns
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223249-1/0622W17 | static/Brython3.1.1-20150328-091302/Lib/string.py | 734 | 9410 | """A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| gpl-3.0 |
morreene/tradenews | venv/Lib/site-packages/sqlalchemy/ext/compiler.py | 36 | 16257 | # ext/compiler.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an API for creation of custom ClauseElements and compilers.
Synopsis
========
Usage involves the creation of one or more
:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or
more callables defining its compilation::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ColumnClause
class MyColumn(ColumnClause):
pass
@compiles(MyColumn)
def compile_mycolumn(element, compiler, **kw):
return "[%s]" % element.name
Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`,
the base expression element for named column objects. The ``compiles``
decorator registers itself with the ``MyColumn`` class so that it is invoked
when the object is compiled to a string::
from sqlalchemy import select
s = select([MyColumn('x'), MyColumn('y')])
print str(s)
Produces::
SELECT [x], [y]
Dialect-specific compilation rules
==================================
Compilers can also be made dialect-specific. The appropriate compiler will be
invoked for the dialect in use::
from sqlalchemy.schema import DDLElement
class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER COLUMN %s ..." % element.column.name
@compiles(AlterColumn, 'postgresql')
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name,
element.column.name)
The second ``visit_alter_table`` will be invoked when any ``postgresql``
dialect is used.
Compiling sub-elements of a custom expression construct
=======================================================
The ``compiler`` argument is the
:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object
can be inspected for any information about the in-progress compilation,
including ``compiler.dialect``, ``compiler.statement`` etc. The
:class:`~sqlalchemy.sql.compiler.SQLCompiler` and
:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
method which can be used for compilation of embedded attributes::
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s (%s)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select)
)
insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
print insert
Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z
FROM mytable WHERE mytable.x > :x_1)"
.. note::
The above ``InsertFromSelect`` construct is only an example, this actual
functionality is already available using the
:meth:`.Insert.from_select` method.
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
---------------------------------------------
SQL and DDL constructs are each compiled using different base compilers -
``SQLCompiler`` and ``DDLCompiler``. A common need is to access the
compilation rules of SQL expressions from within a DDL expression. The
``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as
below where we generate a CHECK constraint that embeds a SQL expression::
@compiles(MyConstraint)
def compile_my_constraint(constraint, ddlcompiler, **kw):
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
ddlcompiler.sql_compiler.process(
constraint.expression, literal_binds=True)
)
Above, we add an additional flag to the process step as called by
:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This
indicates that any SQL expression which refers to a :class:`.BindParameter`
object or other "literal" object such as those which refer to strings or
integers should be rendered **in-place**, rather than being referred to as
a bound parameter; when emitting DDL, bound parameters are typically not
supported.
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
==================================
Recall from the section :ref:`autocommit` that the :class:`.Engine`, when
asked to execute a construct in the absence of a user-defined transaction,
detects if the given construct represents DML or DDL, that is, a data
modification or data definition statement, which requires (or may require,
in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what
SQLAlchemy does). Checking for this is actually accomplished by checking for
the "autocommit" execution option on the construct. When building a
construct like an INSERT derivation, a new DDL type, or perhaps a stored
procedure that alters data, the "autocommit" option needs to be set in order
for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
Currently a quick way to do this is to subclass :class:`.Executable`, then
add the "autocommit" flag to the ``_execution_options`` dictionary (note this
is a "frozen" dictionary which supplies a generative ``union()`` method)::
from sqlalchemy.sql.expression import Executable, ClauseElement
class MyInsertThing(Executable, ClauseElement):
_execution_options = \\
Executable._execution_options.union({'autocommit': True})
More succinctly, if the construct is truly similar to an INSERT, UPDATE, or
DELETE, :class:`.UpdateBase` can be used, which already is a subclass
of :class:`.Executable`, :class:`.ClauseElement` and includes the
``autocommit`` flag::
from sqlalchemy.sql.expression import UpdateBase
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the
"autocommit" flag turned on.
Changing the default compilation of existing constructs
=======================================================
The compiler extension applies just as well to the existing constructs. When
overriding the compilation of a built in SQL construct, the @compiles
decorator is invoked upon the appropriate class (be sure to use the class,
i.e. ``Insert`` or ``Select``, instead of the creation function such
as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation
routine, use the appropriate visit_XXX method - this
because compiler.process() will call upon the overriding routine and cause
an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@compiles(Insert)
def prefix_inserts(insert, compiler, **kw):
return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
The above compiler will prefix all INSERT statements with "some prefix" when
compiled.
.. _type_compilation_extension:
Changing Compilation of Types
=============================
``compiler`` works for types, too, such as below where we implement the
MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
@compiles(String, 'mssql')
@compiles(VARCHAR, 'mssql')
def compile_varchar(element, compiler, **kw):
if element.length == 'max':
return "VARCHAR('max')"
else:
return compiler.visit_VARCHAR(element, **kw)
foo = Table('foo', metadata,
Column('data', VARCHAR('max'))
)
Subclassing Guidelines
======================
A big part of using the compiler extension is subclassing SQLAlchemy
expression constructs. To make this easier, the expression and
schema packages feature a set of "bases" intended for common tasks.
A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
the object will automatically have Python "comparison" behavior.
:class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a
``type`` member which is expression's return type. This can be established
at the instance level in the constructor, or at the class level if its
generally constant::
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
statements along the line of "SELECT FROM <some function>"
``FunctionElement`` adds in the ability to be used in the FROM clause of a
``select()`` construct::
from sqlalchemy.sql.expression import FunctionElement
class coalesce(FunctionElement):
name = 'coalesce'
@compiles(coalesce)
def compile(element, compiler, **kw):
return "coalesce(%s)" % compiler.process(element.clauses)
@compiles(coalesce, 'oracle')
def compile(element, compiler, **kw):
if len(element.clauses) > 2:
raise TypeError("coalesce only supports two arguments on Oracle")
return "nvl(%s)" % compiler.process(element.clauses)
* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions,
like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement``
subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``.
``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the
``execute_at()`` method, allowing the construct to be invoked during CREATE
TABLE and DROP TABLE sequences.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which
should be used with any expression class that represents a "standalone"
SQL statement that can be passed directly to an ``execute()`` method. It
is already implicit within ``DDLElement`` and ``FunctionElement``.
Further Examples
================
"UTC timestamp" function
-------------------------
A function that works like "CURRENT_TIMESTAMP" except applies the
appropriate conversions so that the time is in UTC time. Timestamps are best
stored in relational databases as UTC, without time zones. UTC so that your
database doesn't think time has gone backwards in the hour when daylight
savings ends, without timezones because timezones are like character
encodings - they're best applied only at the endpoints of an application
(i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For Postgresql and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
metadata = MetaData()
event = Table("event", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50), nullable=False),
Column("timestamp", DateTime, server_default=utcnow())
)
"GREATEST" function
-------------------
The "GREATEST" function is given any number of arguments and returns the one
that is of the highest value - its equivalent to Python's ``max``
function. A SQL standard version versus a CASE based version which only
accommodates two arguments::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return "CASE WHEN %s > %s THEN %s ELSE %s END" % (
compiler.process(arg1),
compiler.process(arg2),
compiler.process(arg1),
compiler.process(arg2),
)
Example usage::
Session.query(Account).\\
filter(
greatest(
Account.checking_balance,
Account.savings_balance) > 10000
)
"false" expression
------------------
Render a "false" constant expression, rendering as "0" on platforms that
don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@compiles(sql_false)
def default_false(element, compiler, **kw):
return "false"
@compiles(sql_false, 'mssql')
@compiles(sql_false, 'mysql')
@compiles(sql_false, 'oracle')
def int_false(element, compiler, **kw):
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from .. import exc
from ..sql import visitors
def compiles(class_, *specs):
"""Register a function as a compiler for a
given :class:`.ClauseElement` type."""
def decorate(fn):
existing = class_.__dict__.get('_compiler_dispatcher', None)
existing_dispatch = class_.__dict__.get('_compiler_dispatch')
if not existing:
existing = _dispatcher()
if existing_dispatch:
existing.specs['default'] = existing_dispatch
# TODO: why is the lambda needed ?
setattr(class_, '_compiler_dispatch',
lambda *arg, **kw: existing(*arg, **kw))
setattr(class_, '_compiler_dispatcher', existing)
if specs:
for s in specs:
existing.specs[s] = fn
else:
existing.specs['default'] = fn
return fn
return decorate
def deregister(class_):
"""Remove all custom compilers associated with a given
:class:`.ClauseElement` type."""
if hasattr(class_, '_compiler_dispatcher'):
# regenerate default _compiler_dispatch
visitors._generate_dispatch(class_)
# remove custom directive
del class_._compiler_dispatcher
class _dispatcher(object):
def __init__(self):
self.specs = {}
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
try:
fn = self.specs['default']
except KeyError:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
return fn(element, compiler, **kw)
| bsd-3-clause |
fragglet/omgifol | lump.py | 1 | 11026 | # Import the Python Imaging Library if it is available. On error, ignore
# the problem and continue. PIL being absent should only affect the
# graphic lump loading/saving methods and the user may not be interested
# in installing PIL just to pass this line if not interested in using the
# graphics functionality at all.
try:
import Image, ImageDraw, ImageOps
except:
pass
import os
import omg.palette
from omg.util import *
class Lump(object):
"""Basic lump class. Instances of Lump (and its subclasses)
always have the following:
.data -- a string holding the lump's data
.from_file -- load the data to a file
.to_file -- save the data to a file
The default Lump class merely copies the raw data when
loading/saving to files, but subclasses may convert data
appropriately (for example, Graphic supports various image
formats)."""
def __init__(self, data=None, from_file=None):
"""Create a new instance. The `data` parameter may be a string
representing data for the lump. The `source` parameter may be
a path to a file or a file-like object to load from."""
self.data = ""
if issubclass(type(data), Lump):
self.data = data.data
elif data is not None:
self.data = data or ""
if from_file:
self.from_file(from_file)
def from_file(self, source):
"""Load data from a file. Source may be a path name string
or a file-like object (with a `write` method)."""
self.data = readfile(source)
def to_file(self, target):
"""Write data to a file. Target may be a path name string
or a file-like object (with a `write` method)."""
writefile(target, self.data)
def copy(self):
return deepcopy(self)
class Music(Lump):
"""Subclass of Lump, for music lumps. Not yet implemented."""
pass
class Sound(Lump):
"""Subclass of Lump, for sound lumps. Not yet implemented."""
pass
class Graphic(Lump):
"""Subclass of Lump, for Doom format graphics. Supports
conversion from/to RAWs (sequences of bytes) and PIL
Image objects, as well as saving to/loading from various
file formats (via PIL).
Useful attributes:
.dimensions -- (width, height)
.width -- width of the image
.height -- height of the image
.x_offset -- x offset
.y_offset -- y offset
"""
def __init__(self, data=None, from_file=None, palette=None):
self.palette = palette or omg.palette.default
Lump.__init__(self, data, from_file)
def get_offsets(self):
"""Retrieve the (x, y) offsets of the graphic."""
return unpack('hh', self.data[4:8])
def set_offsets(self, xy):
"""Set the (x, y) offsets of the graphic."""
self.data = self.data[0:4] + pack('hh', *xy) + self.data[8:]
def get_dimensions(self):
"""Retrieve the (width, height) dimensions of the graphic."""
return unpack('hh', self.data[0:4])
offsets = property(get_offsets, set_offsets)
x_offset = property(lambda self: self.offsets[0],
lambda self, x: self.set_offsets((x, self.y_offset)))
y_offset = property(lambda self: self.offsets[1],
lambda self, y: self.set_offsets((self.x_offset, y)))
dimensions = property(get_dimensions)
width = property(lambda self: self.dimensions[0])
height = property(lambda self: self.dimensions[1])
def from_raw(self, data, width, height, x_offset=0, y_offset=0, pal=None):
"""Load a raw 8-bpp image, converting to the Doom picture format
(used by all graphics except flats)"""
pal = pal or omg.palette.default
trans = chr(pal.tran_index)
# First pass: extract pixel data in column+post format
columns_in = [data[n:width*height:width] for n in range(width)]
columns_out = []
for column in columns_in:
# Split into chunks of continuous non-transparent pixels
postdata = filter(None, column.split(trans))
# Find the y position where each chunk starts
start_rows = []
in_trans = True
for y in range(height):
if column[y] == trans:
in_trans = True
elif in_trans:
start_rows.append(y)
in_trans = False
columns_out.append(zip(start_rows, postdata))
# Second pass: compile column+post data, adding pointers
data = []
columnptrs = []
pointer = 4*width + 8
for column in columns_out:
columnptrs.append(pack('l', pointer))
for row, pixels in column:
data.append("%c%c\x00%s\x00" % (row, len(pixels), pixels))
pointer += 4 + len(pixels)
data.append('\xff')
pointer += 1
# Merge everything together
self.data = ''.join([pack('4h', width, height, x_offset, y_offset),
''.join(columnptrs), ''.join(data)])
def to_raw(self, tran_index=None):
"""Returns self converted to a raw (8-bpp) image.
`tran_index` specifies the palette index to use for
transparent pixels. The value defaults to that of the
Graphic object's palette instance."""
data = self.data
width, height = self.dimensions
tran_index = tran_index or self.palette.tran_index
output = [chr(tran_index)] * (width*height)
pointers = unpack('%il'%width, data[8 : 8 + width*4])
for x in xrange(width):
pointer = pointers[x]
while data[pointer] != '\xff':
post_length = ord(data[pointer+1])
op = ord(data[pointer])*width + x
for p in range(pointer + 3, pointer + post_length + 3):
output[op] = data[p]
op += width
pointer += post_length + 4
return join(output)
def to_Image(self):
"""Convert to a PIL Image instance"""
im = Image.new('P', self.dimensions, None)
if isinstance(self, Flat):
im.fromstring(self.data)
else:
im.fromstring(self.to_raw())
im.putpalette(self.palette.save_bytes)
return im
def from_Image(self, im, translate=False):
"""Load from a PIL Image instance
If the input image is 24-bit, the colors will be looked up
in the current palette.
If the input image is 8-bit, indices will simply be copied
from the input image. To properly translate colors between
palettes, set the `translate` parameter."""
pixels = im.tostring()
width, height = im.size
# High resolution graphics not supported yet, so truncate
height = min(254, height)
xoff, yoff = (width // 2)-1, height-5
if im.mode == "RGB":
pixels = join([chr(self.palette.match(unpack('BBB', \
pixels[i*3:(i+1)*3]))) for i in range(width*height)])
elif im.mode == 'P':
srcpal = im.palette.tostring()
if translate:
R = [ord(c) for c in srcpal[0::3]]
G = [ord(c) for c in srcpal[1::3]]
B = [ord(c) for c in srcpal[2::3]]
# Work around PIL bug: "RGB" loads as "BGR" from bmps (?)
if filename[-4:].lower() == '.bmp':
srcpal = zip(B, G, R)
else:
srcpal = zip(R, G, B)
lexicon = [chr(self.palette.match(c)) for c in srcpal]
pixels = join([lexicon[ord(b)] for b in pixels])
else:
# Simply copy pixels. However, make sure to translate
# all colors matching the transparency color to the
# right index. This is necessary because programs
# aren't consistent in choice of position for the
# transparent entry.
packed_color = pack("BBB", *pal.tran_color)
ri = 0
while ri != -1:
ri = srcpal.find(packed_color, ri+3)
if not ri % 3 and ri//3 != self.palette.tran_index:
pixels = pixels.replace(chr(ri//3),
chr(self.palette.tran_index))
else:
raise TypeError, "image mode must be 'P' or 'RGB'"
self.from_raw(pixels, width, height, xoff, yoff, self.palette)
def from_file(self, filename, translate=False):
"""Load graphic from an image file."""
if filename[-4:].lower() == '.lmp':
self.data = readfile(filename)
else:
im = Image.open(filename)
self.from_Image(im, translate)
def to_file(self, filename, mode='P'):
"""Save the graphic to an image file.
The output format is selected based on the filename extension.
For example, "file.jpg" saves to JPEG format. If the file has
no extension, PNG format is used.
Special cases: ".lmp" saves the raw lump data, and ".raw" saves
the raw pixel data.
`mode` may be be 'P' or 'RGB' for palette or 24 bit output,
respectively. However, .raw ignores this parameter and always
writes in palette mode."""
format = os.path.splitext(filename)[1:].upper()
if format == 'LMP': writefile(filename, self.data)
elif format == 'RAW': writefile(filename, self.to_raw())
else:
im = self.to_Image()
om = im.convert(mode)
if format:
om.save(filename)
else:
om.save(filename, "PNG")
def translate(self, pal):
"""Translate (in-place) the graphic to another palette."""
lexicon = [chr(pal.match(self.palette.colors[i])) for i in range(256)]
lexicon[self.palette.tran_index] = chr(pal.tran_index)
if isinstance(self, Flat):
self.data = join([lexicon[ord(b)] for b in self.data])
else:
raw = self.to_raw()
#raw = raw.replace(chr(self.palette.tran_index), chr(pal.tran_index))
self.load_raw(join([lexicon[ord(b)] for b in raw]),
self.width, self.height,
self.x_offset, self.y_offset)
class Flat(Graphic):
"""Subclass of Graphic, for flat graphics"""
def get_dimensions(self):
sz = len(self.data)
if sz == 4096: return (64, 64)
if sz == 4160: return (64, 65)
root = int(sz**0.5)
if root**2 != sz:
raise TypeError, "unable to determine size: not a square number"
return (root, root)
dimensions = property(get_dimensions)
width = property(lambda self: self.dimensions[0])
height = property(lambda self: self.dimensions[1])
def load_raw(self, data, *unused):
self.data = data
def to_raw(self):
return self.data
| mit |
shengzhou/onie | test/lib/test_utils.py | 6 | 1110 | #
# Collection of useful test utility methods
#
# Copyright (C) 2013 Curt Brune <curt@cumulusnetworks.com>
#
# SPDX-License-Identifier: GPL-2.0
#-------------------------------------------------------------------------------
#
# Imports
#
try:
import sys
import os
import re
import io
import logging
import subprocess
except ImportError, e:
raise ImportError (str(e) + "- required module not found")
#-------------------------------------------------------------------------------
#
# Functions
#
def exec_command(cmd):
'''
Helper routine for running external shell commands.
'''
retval = 0
try:
logging.debug("Executing: " + cmd)
output = subprocess.check_output(cmd,
shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
retval = e.returncode
output = e.output
logging.warning("Shell command failed: " + cmd)
logging.warning("Failed command output: " + output)
return (retval, output)
| gpl-2.0 |
gauravbose/digital-menu | django/core/management/commands/loaddata.py | 77 | 12783 | from __future__ import unicode_literals
import glob
import gzip
import os
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text
from django.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = ("No database fixture specified. Please provide the "
"path of at least one fixture in the command line.")
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+',
help='Fixture labels.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.')
parser.add_argument('--app', action='store', dest='app_label',
default=None, help='Only look for fixtures in the specified app.')
parser.add_argument('--ignorenonexistent', '-i', action='store_true',
dest='ignore', default=False,
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.')
def handle(self, *fixture_labels, **options):
self.ignore = options.get('ignore')
self.using = options.get('database')
self.app_label = options.get('app_label')
self.hide_empty = options.get('hide_empty', False)
self.verbosity = options.get('verbosity')
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_count == 0 and self.hide_empty:
pass
elif self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_object_count, self.fixture_count))
def load_label(self, fixture_label):
"""
Loads fixtures files for a given label.
"""
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s." %
(ser_fmt, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(ser_fmt, fixture,
using=self.using, ignorenonexistent=self.ignore)
for obj in objects:
objects_in_fixture += 1
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@lru_cache.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""
Finds fixture files for a given label.
"""
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats.keys()) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = ('.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts))
targets = set('.'.join((fixture_name, suffix)) for suffix in suffixes)
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
for candidate in glob.iglob(os.path.join(fixture_dir, fixture_name + '*')):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if fixture_name != 'initial_data' and not fixture_files:
# Warning kept for backwards-compatibility; why not an exception?
warnings.warn("No fixture named '%s' found." % fixture_name)
elif fixture_name == 'initial_data' and fixture_files:
warnings.warn(
'initial_data fixtures are deprecated. Use data migrations instead.',
RemovedInDjango19Warning
)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [upath(os.path.abspath(os.path.realpath(d))) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Splits fixture name in name, serialization format, compression format.
"""
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| bsd-3-clause |
kreatorkodi/repository.torrentbr | plugin.video.yatp/site-packages/hachoir_parser/archive/zlib.py | 74 | 13744 | """Detailed ZLIB parser
Author: Robert Xiao
Creation date: July 9 2007
"""
from hachoir_parser import Parser
from hachoir_core.field import (Bit, Bits, Field, Int16, UInt32,
Enum, FieldSet, GenericFieldSet,
PaddingBits, ParserError, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.tools import paddingSize, alignValue
def extend_data(data, length, offset):
"""Extend data using a length and an offset."""
if length >= offset:
new_data = data[-offset:] * (alignValue(length, offset) // offset)
return data + new_data[:length]
else:
return data + data[-offset:-offset+length]
def build_tree(lengths):
"""Build a Huffman tree from a list of lengths.
The ith entry of the input list is the length of the Huffman code corresponding to
integer i, or 0 if the integer i is unused."""
max_length = max(lengths) + 1
bit_counts = [0]*max_length
next_code = [0]*max_length
tree = {}
for i in lengths:
if i:
bit_counts[i] += 1
code = 0
for i in xrange(1, len(bit_counts)):
next_code[i] = code = (code + bit_counts[i-1]) << 1
for i, ln in enumerate(lengths):
if ln:
tree[(ln, next_code[ln])] = i
next_code[ln] += 1
return tree
class HuffmanCode(Field):
"""Huffman code. Uses tree parameter as the Huffman tree."""
def __init__(self, parent, name, tree, description=None):
Field.__init__(self, parent, name, 0, description)
endian = self.parent.endian
stream = self.parent.stream
addr = self.absolute_address
value = 0
while (self.size, value) not in tree:
if self.size > 256:
raise ParserError("Huffman code too long!")
bit = stream.readBits(addr, 1, endian)
value <<= 1
value += bit
self._size += 1
addr += 1
self.huffvalue = value
self.realvalue = tree[(self.size, value)]
def createValue(self):
return self.huffvalue
class DeflateBlock(FieldSet):
# code: (min, max, extrabits)
LENGTH_SYMBOLS = {257:(3,3,0),
258:(4,4,0),
259:(5,5,0),
260:(6,6,0),
261:(7,7,0),
262:(8,8,0),
263:(9,9,0),
264:(10,10,0),
265:(11,12,1),
266:(13,14,1),
267:(15,16,1),
268:(17,18,1),
269:(19,22,2),
270:(23,26,2),
271:(27,30,2),
272:(31,34,2),
273:(35,42,3),
274:(43,50,3),
275:(51,58,3),
276:(59,66,3),
277:(67,82,4),
278:(83,98,4),
279:(99,114,4),
280:(115,130,4),
281:(131,162,5),
282:(163,194,5),
283:(195,226,5),
284:(227,257,5),
285:(258,258,0)
}
DISTANCE_SYMBOLS = {0:(1,1,0),
1:(2,2,0),
2:(3,3,0),
3:(4,4,0),
4:(5,6,1),
5:(7,8,1),
6:(9,12,2),
7:(13,16,2),
8:(17,24,3),
9:(25,32,3),
10:(33,48,4),
11:(49,64,4),
12:(65,96,5),
13:(97,128,5),
14:(129,192,6),
15:(193,256,6),
16:(257,384,7),
17:(385,512,7),
18:(513,768,8),
19:(769,1024,8),
20:(1025,1536,9),
21:(1537,2048,9),
22:(2049,3072,10),
23:(3073,4096,10),
24:(4097,6144,11),
25:(6145,8192,11),
26:(8193,12288,12),
27:(12289,16384,12),
28:(16385,24576,13),
29:(24577,32768,13),
}
CODE_LENGTH_ORDER = [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
def __init__(self, parent, name, uncomp_data="", *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.uncomp_data = uncomp_data
def createFields(self):
yield Bit(self, "final", "Is this the final block?") # BFINAL
yield Enum(Bits(self, "compression_type", 2), # BTYPE
{0:"None", 1:"Fixed Huffman", 2:"Dynamic Huffman", 3:"Reserved"})
if self["compression_type"].value == 0: # no compression
padding = paddingSize(self.current_size + self.absolute_address, 8) # align on byte boundary
if padding:
yield PaddingBits(self, "padding[]", padding)
yield Int16(self, "len")
yield Int16(self, "nlen", "One's complement of len")
if self["len"].value != ~self["nlen"].value:
raise ParserError("len must be equal to the one's complement of nlen!")
if self["len"].value: # null stored blocks produced by some encoders (e.g. PIL)
yield RawBytes(self, "data", self["len"].value, "Uncompressed data")
return
elif self["compression_type"].value == 1: # Fixed Huffman
length_tree = {} # (size, huffman code): value
distance_tree = {}
for i in xrange(144):
length_tree[(8, i+48)] = i
for i in xrange(144, 256):
length_tree[(9, i+256)] = i
for i in xrange(256, 280):
length_tree[(7, i-256)] = i
for i in xrange(280, 288):
length_tree[(8, i-88)] = i
for i in xrange(32):
distance_tree[(5, i)] = i
elif self["compression_type"].value == 2: # Dynamic Huffman
yield Bits(self, "huff_num_length_codes", 5, "Number of Literal/Length Codes, minus 257")
yield Bits(self, "huff_num_distance_codes", 5, "Number of Distance Codes, minus 1")
yield Bits(self, "huff_num_code_length_codes", 4, "Number of Code Length Codes, minus 4")
code_length_code_lengths = [0]*19 # confusing variable name...
for i in self.CODE_LENGTH_ORDER[:self["huff_num_code_length_codes"].value+4]:
field = Bits(self, "huff_code_length_code[%i]" % i, 3, "Code lengths for the code length alphabet")
yield field
code_length_code_lengths[i] = field.value
code_length_tree = build_tree(code_length_code_lengths)
length_code_lengths = []
distance_code_lengths = []
for numcodes, name, lengths in (
(self["huff_num_length_codes"].value + 257, "length", length_code_lengths),
(self["huff_num_distance_codes"].value + 1, "distance", distance_code_lengths)):
while len(lengths) < numcodes:
field = HuffmanCode(self, "huff_%s_code[]" % name, code_length_tree)
value = field.realvalue
if value < 16:
prev_value = value
field._description = "Literal Code Length %i (Huffman Code %i)" % (value, field.value)
yield field
lengths.append(value)
else:
info = {16: (3,6,2),
17: (3,10,3),
18: (11,138,7)}[value]
if value == 16:
repvalue = prev_value
else:
repvalue = 0
field._description = "Repeat Code %i, Repeating value (%i) %i to %i times (Huffman Code %i)" % (value, repvalue, info[0], info[1], field.value)
yield field
extrafield = Bits(self, "huff_%s_code_extra[%s" % (name, field.name.split('[')[1]), info[2])
num_repeats = extrafield.value+info[0]
extrafield._description = "Repeat Extra Bits (%i), total repeats %i"%(extrafield.value, num_repeats)
yield extrafield
lengths += [repvalue]*num_repeats
length_tree = build_tree(length_code_lengths)
distance_tree = build_tree(distance_code_lengths)
else:
raise ParserError("Unsupported compression type 3!")
while True:
field = HuffmanCode(self, "length_code[]", length_tree)
value = field.realvalue
if value < 256:
field._description = "Literal Code %r (Huffman Code %i)" % (chr(value), field.value)
yield field
self.uncomp_data += chr(value)
if value == 256:
field._description = "Block Terminator Code (256) (Huffman Code %i)" % field.value
yield field
break
elif value > 256:
info = self.LENGTH_SYMBOLS[value]
if info[2] == 0:
field._description = "Length Code %i, Value %i (Huffman Code %i)" % (value, info[0], field.value)
length = info[0]
yield field
else:
field._description = "Length Code %i, Values %i to %i (Huffman Code %i)" % (value, info[0], info[1], field.value)
yield field
extrafield = Bits(self, "length_extra[%s" % field.name.split('[')[1], info[2])
length = extrafield.value + info[0]
extrafield._description = "Length Extra Bits (%i), total length %i"%(extrafield.value, length)
yield extrafield
field = HuffmanCode(self, "distance_code[]", distance_tree)
value = field.realvalue
info = self.DISTANCE_SYMBOLS[value]
if info[2] == 0:
field._description = "Distance Code %i, Value %i (Huffman Code %i)" % (value, info[0], field.value)
distance = info[0]
yield field
else:
field._description = "Distance Code %i, Values %i to %i (Huffman Code %i)" % (value, info[0], info[1], field.value)
yield field
extrafield = Bits(self, "distance_extra[%s" % field.name.split('[')[1], info[2])
distance = extrafield.value + info[0]
extrafield._description = "Distance Extra Bits (%i), total length %i"%(extrafield.value, distance)
yield extrafield
self.uncomp_data = extend_data(self.uncomp_data, length, distance)
class DeflateData(GenericFieldSet):
endian = LITTLE_ENDIAN
def createFields(self):
uncomp_data = ""
blk=DeflateBlock(self, "compressed_block[]", uncomp_data)
yield blk
uncomp_data = blk.uncomp_data
while not blk["final"].value:
blk=DeflateBlock(self, "compressed_block[]", uncomp_data)
yield blk
uncomp_data = blk.uncomp_data
padding = paddingSize(self.current_size + self.absolute_address, 8) # align on byte boundary
if padding:
yield PaddingBits(self, "padding[]", padding)
self.uncompressed_data = uncomp_data
class ZlibData(Parser):
PARSER_TAGS = {
"id": "zlib",
"category": "archive",
"file_ext": ("zlib",),
"min_size": 8*8,
"description": "ZLIB Data",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["compression_method"].value != 8:
return "Incorrect compression method"
if ((self["compression_info"].value << 12) +
(self["compression_method"].value << 8) +
(self["flag_compression_level"].value << 6) +
(self["flag_dictionary_present"].value << 5) +
(self["flag_check_bits"].value)) % 31 != 0:
return "Invalid flag check value"
return True
def createFields(self):
yield Enum(Bits(self, "compression_method", 4), {8:"deflate", 15:"reserved"}) # CM
yield Bits(self, "compression_info", 4, "base-2 log of the window size") # CINFO
yield Bits(self, "flag_check_bits", 5) # FCHECK
yield Bit(self, "flag_dictionary_present") # FDICT
yield Enum(Bits(self, "flag_compression_level", 2), # FLEVEL
{0:"Fastest", 1:"Fast", 2:"Default", 3:"Maximum, Slowest"})
if self["flag_dictionary_present"].value:
yield textHandler(UInt32(self, "dict_checksum", "ADLER32 checksum of dictionary information"), hexadecimal)
yield DeflateData(self, "data", self.stream, description = "Compressed Data")
yield textHandler(UInt32(self, "data_checksum", "ADLER32 checksum of compressed data"), hexadecimal)
def zlib_inflate(stream, wbits=None, prevdata=""):
if wbits is None or wbits >= 0:
return ZlibData(stream)["data"].uncompressed_data
else:
data = DeflateData(None, "root", stream, "", stream.askSize(None))
for unused in data:
pass
return data.uncompressed_data
| gpl-2.0 |
edgarli/proj8 | env/lib/python3.4/site-packages/setuptools/command/setopt.py | 458 | 5080 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from setuptools.compat import ConfigParser
log.debug("Reading configuration from %s", filename)
opts = ConfigParser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| artistic-2.0 |
joshua0pang/bazel | third_party/py/mock/tests/_testwith.py | 109 | 6077 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from __future__ import with_statement
from tests.support import unittest2, is_instance
from mock import MagicMock, Mock, patch, sentinel, mock_open, call
from tests.support_with import catch_warnings, nested
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest2.TestCase):
def test_with_statement(self):
with patch('tests._testwith.something', sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('tests._testwith.something', sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('tests._testwith.something') as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
# nested is deprecated in Python 2.7
with nested(patch('tests._testwith.something'),
patch('tests._testwith.something_else')) as (mock_something, mock_something_else):
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('tests._testwith.something', sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('tests._testwith.something', sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('tests._testwith.something') as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('tests._testwith.something') as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('tests._testwith.something_else') as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest2.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
eonezhang/thrift | lib/py/src/protocol/TCompactProtocol.py | 106 | 10992 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TProtocol import *
from struct import pack, unpack
__all__ = ['TCompactProtocol', 'TCompactProtocolFactory']
CLEAR = 0
FIELD_WRITE = 1
VALUE_WRITE = 2
CONTAINER_WRITE = 3
BOOL_WRITE = 4
FIELD_READ = 5
CONTAINER_READ = 6
VALUE_READ = 7
BOOL_READ = 8
def make_helper(v_from, container):
def helper(func):
def nested(self, *args, **kwargs):
assert self.state in (v_from, container), (self.state, v_from, container)
return func(self, *args, **kwargs)
return nested
return helper
writer = make_helper(VALUE_WRITE, CONTAINER_WRITE)
reader = make_helper(VALUE_READ, CONTAINER_READ)
def makeZigZag(n, bits):
checkIntegerLimits(n, bits)
return (n << 1) ^ (n >> (bits - 1))
def fromZigZag(n):
return (n >> 1) ^ -(n & 1)
def writeVarint(trans, n):
out = []
while True:
if n & ~0x7f == 0:
out.append(n)
break
else:
out.append((n & 0xff) | 0x80)
n = n >> 7
trans.write(''.join(map(chr, out)))
def readVarint(trans):
result = 0
shift = 0
while True:
x = trans.readAll(1)
byte = ord(x)
result |= (byte & 0x7f) << shift
if byte >> 7 == 0:
return result
shift += 7
class CompactType:
STOP = 0x00
TRUE = 0x01
FALSE = 0x02
BYTE = 0x03
I16 = 0x04
I32 = 0x05
I64 = 0x06
DOUBLE = 0x07
BINARY = 0x08
LIST = 0x09
SET = 0x0A
MAP = 0x0B
STRUCT = 0x0C
CTYPES = {TType.STOP: CompactType.STOP,
TType.BOOL: CompactType.TRUE, # used for collection
TType.BYTE: CompactType.BYTE,
TType.I16: CompactType.I16,
TType.I32: CompactType.I32,
TType.I64: CompactType.I64,
TType.DOUBLE: CompactType.DOUBLE,
TType.STRING: CompactType.BINARY,
TType.STRUCT: CompactType.STRUCT,
TType.LIST: CompactType.LIST,
TType.SET: CompactType.SET,
TType.MAP: CompactType.MAP
}
TTYPES = {}
for k, v in CTYPES.items():
TTYPES[v] = k
TTYPES[CompactType.FALSE] = TType.BOOL
del k
del v
class TCompactProtocol(TProtocolBase):
"""Compact implementation of the Thrift protocol driver."""
PROTOCOL_ID = 0x82
VERSION = 1
VERSION_MASK = 0x1f
TYPE_MASK = 0xe0
TYPE_BITS = 0x07
TYPE_SHIFT_AMOUNT = 5
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.state = CLEAR
self.__last_fid = 0
self.__bool_fid = None
self.__bool_value = None
self.__structs = []
self.__containers = []
def __writeVarint(self, n):
writeVarint(self.trans, n)
def writeMessageBegin(self, name, type, seqid):
assert self.state == CLEAR
self.__writeUByte(self.PROTOCOL_ID)
self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT))
self.__writeVarint(seqid)
self.__writeString(name)
self.state = VALUE_WRITE
def writeMessageEnd(self):
assert self.state == VALUE_WRITE
self.state = CLEAR
def writeStructBegin(self, name):
assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_WRITE
self.__last_fid = 0
def writeStructEnd(self):
assert self.state == FIELD_WRITE
self.state, self.__last_fid = self.__structs.pop()
def writeFieldStop(self):
self.__writeByte(0)
def __writeFieldHeader(self, type, fid):
delta = fid - self.__last_fid
if 0 < delta <= 15:
self.__writeUByte(delta << 4 | type)
else:
self.__writeByte(type)
self.__writeI16(fid)
self.__last_fid = fid
def writeFieldBegin(self, name, type, fid):
assert self.state == FIELD_WRITE, self.state
if type == TType.BOOL:
self.state = BOOL_WRITE
self.__bool_fid = fid
else:
self.state = VALUE_WRITE
self.__writeFieldHeader(CTYPES[type], fid)
def writeFieldEnd(self):
assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state
self.state = FIELD_WRITE
def __writeUByte(self, byte):
self.trans.write(pack('!B', byte))
def __writeByte(self, byte):
self.trans.write(pack('!b', byte))
def __writeI16(self, i16):
self.__writeVarint(makeZigZag(i16, 16))
def __writeSize(self, i32):
self.__writeVarint(i32)
def writeCollectionBegin(self, etype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size <= 14:
self.__writeUByte(size << 4 | CTYPES[etype])
else:
self.__writeUByte(0xf0 | CTYPES[etype])
self.__writeSize(size)
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
writeSetBegin = writeCollectionBegin
writeListBegin = writeCollectionBegin
def writeMapBegin(self, ktype, vtype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size == 0:
self.__writeByte(0)
else:
self.__writeSize(size)
self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype])
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
def writeCollectionEnd(self):
assert self.state == CONTAINER_WRITE, self.state
self.state = self.__containers.pop()
writeMapEnd = writeCollectionEnd
writeSetEnd = writeCollectionEnd
writeListEnd = writeCollectionEnd
def writeBool(self, bool):
if self.state == BOOL_WRITE:
if bool:
ctype = CompactType.TRUE
else:
ctype = CompactType.FALSE
self.__writeFieldHeader(ctype, self.__bool_fid)
elif self.state == CONTAINER_WRITE:
if bool:
self.__writeByte(CompactType.TRUE)
else:
self.__writeByte(CompactType.FALSE)
else:
raise AssertionError("Invalid state in compact protocol")
writeByte = writer(__writeByte)
writeI16 = writer(__writeI16)
@writer
def writeI32(self, i32):
self.__writeVarint(makeZigZag(i32, 32))
@writer
def writeI64(self, i64):
self.__writeVarint(makeZigZag(i64, 64))
@writer
def writeDouble(self, dub):
self.trans.write(pack('<d', dub))
def __writeString(self, s):
self.__writeSize(len(s))
self.trans.write(s)
writeString = writer(__writeString)
def readFieldBegin(self):
assert self.state == FIELD_READ, self.state
type = self.__readUByte()
if type & 0x0f == TType.STOP:
return (None, 0, 0)
delta = type >> 4
if delta == 0:
fid = self.__readI16()
else:
fid = self.__last_fid + delta
self.__last_fid = fid
type = type & 0x0f
if type == CompactType.TRUE:
self.state = BOOL_READ
self.__bool_value = True
elif type == CompactType.FALSE:
self.state = BOOL_READ
self.__bool_value = False
else:
self.state = VALUE_READ
return (None, self.__getTType(type), fid)
def readFieldEnd(self):
assert self.state in (VALUE_READ, BOOL_READ), self.state
self.state = FIELD_READ
def __readUByte(self):
result, = unpack('!B', self.trans.readAll(1))
return result
def __readByte(self):
result, = unpack('!b', self.trans.readAll(1))
return result
def __readVarint(self):
return readVarint(self.trans)
def __readZigZag(self):
return fromZigZag(self.__readVarint())
def __readSize(self):
result = self.__readVarint()
if result < 0:
raise TException("Length < 0")
return result
def readMessageBegin(self):
assert self.state == CLEAR
proto_id = self.__readUByte()
if proto_id != self.PROTOCOL_ID:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad protocol id in the message: %d' % proto_id)
ver_type = self.__readUByte()
type = (ver_type >> self.TYPE_SHIFT_AMOUNT) & self.TYPE_BITS
version = ver_type & self.VERSION_MASK
if version != self.VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version: %d (expect %d)' % (version, self.VERSION))
seqid = self.__readVarint()
name = self.__readString()
return (name, type, seqid)
def readMessageEnd(self):
assert self.state == CLEAR
assert len(self.__structs) == 0
def readStructBegin(self):
assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_READ
self.__last_fid = 0
def readStructEnd(self):
assert self.state == FIELD_READ
self.state, self.__last_fid = self.__structs.pop()
def readCollectionBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size_type = self.__readUByte()
size = size_type >> 4
type = self.__getTType(size_type)
if size == 15:
size = self.__readSize()
self.__containers.append(self.state)
self.state = CONTAINER_READ
return type, size
readSetBegin = readCollectionBegin
readListBegin = readCollectionBegin
def readMapBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size = self.__readSize()
types = 0
if size > 0:
types = self.__readUByte()
vtype = self.__getTType(types)
ktype = self.__getTType(types >> 4)
self.__containers.append(self.state)
self.state = CONTAINER_READ
return (ktype, vtype, size)
def readCollectionEnd(self):
assert self.state == CONTAINER_READ, self.state
self.state = self.__containers.pop()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
readMapEnd = readCollectionEnd
def readBool(self):
if self.state == BOOL_READ:
return self.__bool_value == CompactType.TRUE
elif self.state == CONTAINER_READ:
return self.__readByte() == CompactType.TRUE
else:
raise AssertionError("Invalid state in compact protocol: %d" %
self.state)
readByte = reader(__readByte)
__readI16 = __readZigZag
readI16 = reader(__readZigZag)
readI32 = reader(__readZigZag)
readI64 = reader(__readZigZag)
@reader
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('<d', buff)
return val
def __readString(self):
len = self.__readSize()
return self.trans.readAll(len)
readString = reader(__readString)
def __getTType(self, byte):
return TTYPES[byte & 0x0f]
class TCompactProtocolFactory:
def __init__(self):
pass
def getProtocol(self, trans):
return TCompactProtocol(trans)
| apache-2.0 |
IptvBrasilGroup/Cleitonleonelcreton.repository | plugin.video.iptvbrondemand.mobile/requestsX/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| gpl-2.0 |
hunch/hunch-sample-app | django/utils/version.py | 13 | 1403 | import django
import os.path
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return u'SVN-%s' % rev
return u'SVN-unknown'
| mit |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pkg_resources/_vendor/packaging/requirements.py | 454 | 4355 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| mit |
sudheerchintala/LearnEraPlatForm | common/lib/xmodule/xmodule/tests/test_video.py | 18 | 24534 | # -*- coding: utf-8 -*-
# pylint: disable=W0212
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py. You can
search for usages of this in the cms and lms tests for examples. You use
this so that it will do things like point the modulestore setting to mongo,
flush the contentstore before and after, load the templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined
in common/lib/xmodule/xmodule/modulestore/tests/factories.py to create
the course, section, subsection, unit, etc.
"""
import unittest
import datetime
from mock import Mock, patch
from . import LogicTest
from lxml import etree
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.video_module import VideoDescriptor, create_youtube_string, get_video_from_cdn
from .test_import import DummySystem
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
system = get_test_descriptor_system()
course_key = SlashSeparatedCourseKey('org', 'course', 'run')
usage_key = course_key.make_usage_key('video', 'SampleProblem')
return system.construct_xblock_from_class(
VideoDescriptor,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
)
class VideoModuleTest(LogicTest):
"""Logic tests for Video Xmodule."""
descriptor_class = VideoDescriptor
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': 'ZwkTiUPN0mg',
'1.25': 'rsq9auxASqI',
'1.50': 'kMyNdzVHHgg'})
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': '',
'1.25': '',
'1.50': ''})
def test_parse_youtube_invalid(self):
"""Ensure that ids that are invalid return an empty dict"""
# invalid id
youtube_str = 'thisisaninvalidid'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# another invalid id
youtube_str = ',::,:,,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# and another one, partially invalid
youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': 'AXdE34_U',
'1.25': 'KLHF9K_Y',
'1.50': 'VO3SxfeD'})
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
self.assertEqual(
VideoDescriptor._parse_youtube(youtube_str),
VideoDescriptor._parse_youtube(youtube_str_hack)
)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
self.assertEqual(
VideoDescriptor._parse_youtube(''),
{'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''}
)
class VideoDescriptorTestBase(unittest.TestCase):
"""
Base class for tests for VideoDescriptor
"""
def setUp(self):
self.descriptor = instantiate_descriptor()
class TestCreateYoutubeString(VideoDescriptorTestBase):
"""
Checks that create_youtube_string correcty extracts information from Video descriptor.
"""
def test_create_youtube_string(self):
"""
Test that Youtube ID strings are correctly created when writing back out to XML.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8"
self.assertEqual(create_youtube_string(self.descriptor), expected)
def test_create_youtube_string_missing(self):
"""
Test that Youtube IDs which aren't explicitly set aren't included in the output string.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
self.assertEqual(create_youtube_string(self.descriptor), expected)
class VideoDescriptorImportTestCase(unittest.TestCase):
"""
Make sure that VideoDescriptor can import an old XML-based video correctly.
"""
def assert_attributes_equal(self, video, attrs):
"""
Assert that `video` has the correct attributes. `attrs` is a map of {metadata_field: value}.
"""
for key, value in attrs.items():
self.assertEquals(getattr(video, key), value)
def test_constructor(self):
sample_xml = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="true"
download_video="true"
start_time="00:00:01"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ua" src="ukrainian_translation.srt" />
<transcript language="ge" src="german_translation.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=sample_xml)
self.assert_attributes_equal(descriptor, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'download_video': True,
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg'],
'data': '',
'transcripts': {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}
})
def test_from_xml(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="uk" src="ukrainian_translation.srt" />
<transcript language="de" src="german_translation.srt" />
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {'uk': 'ukrainian_translation.srt', 'de': 'german_translation.srt'},
})
def test_from_xml_missing_attributes(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_from_xml_missing_download_track(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://www.example.com/track',
'download_track': True,
'download_video': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {},
})
def test_from_xml_no_attributes(self):
"""
Make sure settings are correct if none are explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '<video></video>'
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'OEoXaMPEzfM',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': '',
'transcripts': {},
})
def test_from_xml_double_quotes(self):
"""
Make sure we can handle the double-quoted string format (which was used for exporting for
a few weeks).
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name=""display_name""
html5_sources="["source_1", "source_2"]"
show_captions="false"
download_video="true"
sub=""html5_subtitles""
track=""http://www.example.com/track""
handout=""http://www.example.com/handout""
download_track="true"
youtube_id_0_75=""OEoXaMPEzf65""
youtube_id_1_25=""OEoXaMPEzf125""
youtube_id_1_5=""OEoXaMPEzf15""
youtube_id_1_0=""OEoXaMPEzf10""
/>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'OEoXaMPEzf65',
'youtube_id_1_0': 'OEoXaMPEzf10',
'youtube_id_1_25': 'OEoXaMPEzf125',
'youtube_id_1_5': 'OEoXaMPEzf15',
'show_captions': False,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': True,
'download_video': True,
'html5_sources': ["source_1", "source_2"],
'data': ''
})
def test_from_xml_double_quote_concatenated_youtube(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:"p2Q6BrNhdh8",1.25:"1EeWXzPdhSA"">
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': ''
})
def test_old_video_format(self):
"""
Test backwards compatibility with VideoModule's XML format.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
source="http://www.example.com/source.mp4"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
})
def test_old_video_data(self):
"""
Ensure that Video is able to read VideoModule's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_import_with_float_times(self):
"""
Ensure that Video is able to read VideoModule's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="1.0"
to="60.0">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
class VideoExportTestCase(VideoDescriptorTestBase):
"""
Make sure that VideoDescriptor can export itself to XML correctly.
"""
def assertXmlEqual(self, expected, xml):
for attr in ['tag', 'attrib', 'text', 'tail']:
self.assertEqual(getattr(expected, attr), getattr(xml, attr))
for left, right in zip(expected, xml):
self.assertXmlEqual(left, right)
def test_export_to_xml(self):
"""
Test that we write the correct XML on export.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
self.descriptor.show_captions = False
self.descriptor.start_time = datetime.timedelta(seconds=1.0)
self.descriptor.end_time = datetime.timedelta(seconds=60)
self.descriptor.track = 'http://www.example.com/track'
self.descriptor.handout = 'http://www.example.com/handout'
self.descriptor.download_track = True
self.descriptor.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
self.descriptor.download_video = True
self.descriptor.transcripts = {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}
xml = self.descriptor.definition_to_xml(None) # We don't use the `resource_fs` parameter
expected = etree.fromstring('''\
<video url_name="SampleProblem" start_time="0:00:01" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" end_time="0:01:00" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="german_translation.srt" />
<transcript language="ua" src="ukrainian_translation.srt" />
</video>
''')
self.assertXmlEqual(expected, xml)
def test_export_to_xml_empty_end_time(self):
"""
Test that we write the correct XML on export.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
self.descriptor.show_captions = False
self.descriptor.start_time = datetime.timedelta(seconds=5.0)
self.descriptor.end_time = datetime.timedelta(seconds=0.0)
self.descriptor.track = 'http://www.example.com/track'
self.descriptor.download_track = True
self.descriptor.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
self.descriptor.download_video = True
xml = self.descriptor.definition_to_xml(None) # We don't use the `resource_fs` parameter
expected = etree.fromstring('''\
<video url_name="SampleProblem" start_time="0:00:05" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
</video>
''')
self.assertXmlEqual(expected, xml)
def test_export_to_xml_empty_parameters(self):
"""
Test XML export with defaults.
"""
xml = self.descriptor.definition_to_xml(None)
expected = '<video url_name="SampleProblem"/>\n'
self.assertEquals(expected, etree.tostring(xml, pretty_print=True))
class VideoCdnTest(unittest.TestCase):
"""
Tests for Video CDN.
"""
@patch('requests.get')
def test_get_video_success(self, cdn_response):
"""
Test successful CDN request.
"""
original_video_url = "http://www.original_video.com/original_video.mp4"
cdn_response_video_url = "http://www.cdn_video.com/cdn_video.mp4"
cdn_response_content = '{{"sources":["{cdn_url}"]}}'.format(cdn_url=cdn_response_video_url)
cdn_response.return_value=Mock(status_code=200, content=cdn_response_content)
fake_cdn_url = 'http://fake_cdn.com/'
self.assertEqual(
get_video_from_cdn(fake_cdn_url, original_video_url),
cdn_response_video_url
)
@patch('requests.get')
def test_get_no_video_exists(self, cdn_response):
"""
Test if no alternative video in CDN exists.
"""
original_video_url = "http://www.original_video.com/original_video.mp4"
cdn_response.return_value=Mock(status_code=404)
fake_cdn_url = 'http://fake_cdn.com/'
self.assertIsNone(get_video_from_cdn(fake_cdn_url, original_video_url))
| agpl-3.0 |
chalasr/Flask-P2P | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
witwall/gyp | test/win/gyptest-link-defrelink.py | 210 | 1683 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure a relink is performed when a .def file is touched.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
target = 'test_deffile_dll_ok'
def_contents = test.read('linker-flags/deffile.def')
# This first build makes sure everything is up to date.
test.run_gyp('deffile.gyp', chdir=CHDIR)
test.build('deffile.gyp', target, chdir=CHDIR)
test.up_to_date('deffile.gyp', target, chdir=CHDIR)
def HasExport(binary, export):
full_path = test.built_file_path(binary, chdir=CHDIR)
output = test.run_dumpbin('/exports', full_path)
return export in output
# Verify that only one function is exported.
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
test.fail_test()
# Add AnotherExportedFunction to the def file, then rebuild. If it doesn't
# relink the DLL, then the subsequent check for AnotherExportedFunction will
# fail.
new_def_contents = def_contents + "\n AnotherExportedFunction"
test.write('linker-flags/deffile.def', new_def_contents)
test.build('deffile.gyp', target, chdir=CHDIR)
test.up_to_date('deffile.gyp', target, chdir=CHDIR)
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
saurvs/servo | python/servo/bootstrapper/base.py | 29 | 2033 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import distutils
import subprocess
class BaseBootstrapper(object):
"""Base class for system bootstrappers."""
def __init__(self, interactive=False):
self.package_manager_updated = False
self.interactive = interactive
def ensure_system_packages(self):
'''
Check for missing packages.
'''
raise NotImplementedError('%s must implement ensure_system_packages()' %
__name__)
def install_system_packages(self):
'''
Install packages required to build Servo.
'''
raise NotImplementedError('%s must implement install_system_packages()' %
__name__)
def install_mobile_android_packages(self):
'''
Install packages required to build Servo for Android.
'''
raise NotImplementedError('Cannot bootstrap Servo for Android: '
'%s does not yet implement install_mobile_android_packages()'
% __name__)
def which(self, name):
"""Python implementation of which.
It returns the path of an executable or None if it couldn't be found.
"""
return distutils.spawn.find_executable(name)
def check_output(self, *args, **kwargs):
"""Run subprocess.check_output."""
return subprocess.check_output(*args, **kwargs)
def _ensure_package_manager_updated(self):
if self.package_manager_updated:
return
self._update_package_manager()
self.package_manager_updated = True
def _update_package_manager(self):
"""Updates the package manager's manifests/package list.
This should be defined in child classes.
"""
| mpl-2.0 |
zverevalexei/trex-http-proxy | trex_client/stl/profiles/udp_1pkt_mpls_vm.py | 3 | 1076 | from trex_stl_lib.api import *
from scapy.contrib.mpls import * # import from contrib folder of scapy
class STLS1(object):
def __init__ (self):
pass;
def create_stream (self):
# 2 MPLS label the internal with s=1 (last one)
pkt = Ether()/MPLS(label=17,cos=1,s=0,ttl=255)/MPLS(label=0,cos=1,s=1,ttl=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('x'*20)
vm = STLScVmRaw( [ STLVmFlowVar(name="mlabel", min_value=1, max_value=2000, size=2, op="inc"), # 2 bytes var
STLVmWrMaskFlowVar(fv_name="mlabel", pkt_offset= "MPLS:1.label",pkt_cast_size=4, mask=0xFFFFF000,shift=12) # write to 20bit MSB
]
)
# burst of 100 packets
return STLStream(packet = STLPktBuilder(pkt = pkt ,vm = vm),
mode = STLTXSingleBurst( pps = 1, total_pkts = 100) )
def get_streams (self, direction = 0, **kwargs):
# create 1 stream
return [ self.create_stream() ]
def register():
return STLS1()
| mit |
cga-harvard/cga-worldmap | geonode/contrib/dataverse_layer_metadata/layer_metadata_helper.py | 1 | 7520 | """
Convenience methods to:
1 - "check_for_existing_layer" using DataverseInfo
2 - "retrieve_dataverse_layer_metadata_by_kwargs_installation_and_file_id"
- Retrieve a DataverseLayerMetadata object by DV installation and file_id
3 - add_dataverse_layer_metadata
- Create a DataverseLayerMetadata using a Layer object and DataverseInfo object
4 - link_layer_permissions
- Given a new DataverseLayerMetadata, check if any additional
WorldMap users should have edit permissions (via PermissionLinker)
"""
from __future__ import print_function
import logging
from django import forms
from shared_dataverse_information.shapefile_import.forms import ShapefileImportDataForm
from geonode.contrib.dataverse_layer_metadata.models import DataverseLayerMetadata
from geonode.contrib.dataverse_layer_metadata.forms import DataverseLayerMetadataValidationForm
from geonode.contrib.dataverse_permission_links.permission_linker import PermissionLinker
from geonode.maps.models import Layer
LOGGER = logging.getLogger("geonode.dataverse_layer_metadata.layer_metadata_helper")
def check_for_existing_layer(dataverse_info):
"""
Using the dataverse_info information
Do the datafile_id and dataverse_installation_name
in dataverse_info match an existing DataverseLayerMetadata object?
Yes: return first matching DataverseLayerMetadata object
No: return None
"""
assert isinstance(dataverse_info, dict)\
, "dataverse_info must be an instance of a dict. Found type: %s" % type(dataverse_info)
# Validate the data
f = DataverseLayerMetadataValidationForm(dataverse_info)
if not f.is_valid():
LOGGER.error('check_for_existing_layer. failed validation')
LOGGER.error('Errors: %s' % f.errors)
raise forms.ValidationError('Failed to validate dataverse_info data')
# Check for DataverseLayerMetadata objects with
# the same "datafile_id" AND "dataverse_installation_name"
l = DataverseLayerMetadata.objects.filter(\
datafile_id=f.cleaned_data.get('datafile_id'),
dataverse_installation_name=f.cleaned_data.get('dataverse_installation_name'))
# If DataverseLayerMetadata objects match, return the 1st one
# (Note: Not ~yet~ enforcing only 1 DataverseLayerMetadata per datafile_id.
# But will be only making one layer 'in practice'
# This is a late date chance where original discussions included multiple layers per file.
#
if l.count() > 0:
return l[0]
return None
def retrieve_dataverse_layer_metadata_by_kwargs_installation_and_file_id(**kwargs):
"""Retrieve Dataverse Layer based on installation id and file id"""
if kwargs is None:
return None
datafile_id = kwargs.get('datafile_id', None)
dataverse_installation_name = kwargs.get('dataverse_installation_name', None)
return retrieve_dataverse_layer_metadata_by_installation_and_file_id(\
datafile_id, dataverse_installation_name)
def retrieve_dataverse_layer_metadata_by_installation_and_file_id(\
datafile_id, dataverse_installation_name):
"""
Retrieve a GeoNode layer by an associated DataverseInfo object identified by:
- datafile_id
- dataverse_installation_name
:param kwargs:
:return:
"""
if datafile_id is None or dataverse_installation_name is None:
return None
l = DataverseLayerMetadata.objects.filter(\
datafile_id=datafile_id,
dataverse_installation_name=dataverse_installation_name)
# If DataverseLayerMetadata objects match, return the 1st one
# - Should only be 1 layer
#
if l.count() > 0:
return l[0]
return None
def update_the_layer_metadata(dv_layer_metadata, dataverse_info):
"""Update the DataverseLayerMetadata object with given DataverseInfo"""
assert type(dv_layer_metadata) is DataverseLayerMetadata,\
('dv_layer_metadata must be a DataverseLayerMetadata'
' object. Found type: %s' % type(dv_layer_metadata))
assert type(dataverse_info) is dict,\
"dataverse_info must be type dict. Found type: %s" % type(dataverse_info)
# Validate the data
f = DataverseLayerMetadataValidationForm(dataverse_info)
if not f.is_valid():
raise forms.ValidationError('Failed to validate dataverse_info data')
# Update the metadata, field by field
for k, v in f.cleaned_data.items():
setattr(dv_layer_metadata, k, v)
dv_layer_metadata.save()
# Update the Layer object title and abstract
# Using the 'ShapefileImportDataForm' is a bit redundant,
# but not sure where updates will arise in the future
#
f2 = ShapefileImportDataForm(dataverse_info)
if not f2.is_valid():
raise forms.ValidationError('Failed to validate form_shapefile_import data')
dv_layer_metadata.map_layer.abstract = f2.cleaned_data['abstract']
dv_layer_metadata.map_layer.title = f2.cleaned_data['title']
dv_layer_metadata.map_layer.save()
def add_dataverse_layer_metadata(saved_layer, dataverse_info):
"""
If a Layer has been created via Dataverse, create a DataverseLayerMetadata object.
fail: return None
success: return DataverseLayerMetadata object
"""
assert type(saved_layer) is Layer,\
"saved_layer must be type Layer. Found: %s" % type(Layer)
assert type(dataverse_info) is dict,\
"dataverse_info must be type dict. Found type: %s" % type(dataverse_info)
(success, create_datetime_obj_or_err_str) =\
DataverseLayerMetadataValidationForm.format_datafile_create_datetime(\
dataverse_info.get('datafile_create_datetime', None))
if success is False:
print ('failed to format datetime', create_datetime_obj_or_err_str)
LOGGER.error('Invalid "datafile_create_datetime"\n%s', create_datetime_obj_or_err_str)
return None
dataverse_info['datafile_create_datetime'] = create_datetime_obj_or_err_str
f = DataverseLayerMetadataValidationForm(dataverse_info)
if not f.is_valid():
print ('failed validation')
print (f.errors)
LOGGER.error(('Unexpected form validation error'
' in add_dataverse_layer_metadata. dvn import: %s'),\
f.errors)
return None
# Create the DataverseLayerMetadata object
layer_metadata = f.save(commit=False)
# Add the related Layer object
layer_metadata.map_layer = saved_layer
# Save it!!
layer_metadata.save()
is_linked, err_msg_or_None = link_layer_permissions(layer_metadata)
print(is_linked, err_msg_or_None)
return layer_metadata
def link_layer_permissions(layer_metadata):
"""
For Dataverse-created layers:
Use the PermissionLinker to see if additional WorldMap
users should be given edit permissions
Returns (boolean, error message or None)
- It works: return (True, None)
- It fails: return (False, "[error message]")
"""
if layer_metadata is None:
err_msg = "layer_metadata cannot be None"
LOGGER.error(err_msg)
return (False, err_msg)
layer_name = layer_metadata.map_layer.typename
dataverse_username = layer_metadata.dv_username
perm_linker = PermissionLinker(layer_name, dataverse_username)
if not perm_linker.link_layer():
return (False, pl.error_message)
return (True, None)
| gpl-3.0 |
guedou/scapy-codecov | scapy/automaton.py | 2 | 27965 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Automata with states, transitions and actions.
"""
import types,itertools,time,os,sys,socket,traceback
from select import select
from collections import deque
import thread
from scapy.config import conf
from scapy.utils import do_graph
from scapy.error import log_interactive
from scapy.plist import PacketList
from scapy.data import MTU
from scapy.supersocket import SuperSocket
class ObjectPipe:
def __init__(self):
self.rd,self.wr = os.pipe()
self.queue = deque()
def fileno(self):
return self.rd
def send(self, obj):
self.queue.append(obj)
os.write(self.wr,"X")
def recv(self, n=0):
os.read(self.rd,1)
return self.queue.popleft()
class Message:
def __init__(self, **args):
self.__dict__.update(args)
def __repr__(self):
return "<Message %s>" % " ".join("%s=%r"%(k,v)
for (k,v) in self.__dict__.iteritems()
if not k.startswith("_"))
class _instance_state:
def __init__(self, instance):
self.im_self = instance.im_self
self.im_func = instance.im_func
self.im_class = instance.im_class
def __getattr__(self, attr):
return getattr(self.im_func, attr)
def __call__(self, *args, **kargs):
return self.im_func(self.im_self, *args, **kargs)
def breaks(self):
return self.im_self.add_breakpoints(self.im_func)
def intercepts(self):
return self.im_self.add_interception_points(self.im_func)
def unbreaks(self):
return self.im_self.remove_breakpoints(self.im_func)
def unintercepts(self):
return self.im_self.remove_interception_points(self.im_func)
##############
## Automata ##
##############
class ATMT:
STATE = "State"
ACTION = "Action"
CONDITION = "Condition"
RECV = "Receive condition"
TIMEOUT = "Timeout condition"
IOEVENT = "I/O event"
class NewStateRequested(Exception):
def __init__(self, state_func, automaton, *args, **kargs):
self.func = state_func
self.state = state_func.atmt_state
self.initial = state_func.atmt_initial
self.error = state_func.atmt_error
self.final = state_func.atmt_final
Exception.__init__(self, "Request state [%s]" % self.state)
self.automaton = automaton
self.args = args
self.kargs = kargs
self.action_parameters() # init action parameters
def action_parameters(self, *args, **kargs):
self.action_args = args
self.action_kargs = kargs
return self
def run(self):
return self.func(self.automaton, *self.args, **self.kargs)
def __repr__(self):
return "NewStateRequested(%s)" % self.state
@staticmethod
def state(initial=0,final=0,error=0):
def deco(f,initial=initial, final=final):
f.atmt_type = ATMT.STATE
f.atmt_state = f.func_name
f.atmt_initial = initial
f.atmt_final = final
f.atmt_error = error
def state_wrapper(self, *args, **kargs):
return ATMT.NewStateRequested(f, self, *args, **kargs)
state_wrapper.func_name = "%s_wrapper" % f.func_name
state_wrapper.atmt_type = ATMT.STATE
state_wrapper.atmt_state = f.func_name
state_wrapper.atmt_initial = initial
state_wrapper.atmt_final = final
state_wrapper.atmt_error = error
state_wrapper.atmt_origfunc = f
return state_wrapper
return deco
@staticmethod
def action(cond, prio=0):
def deco(f,cond=cond):
if not hasattr(f,"atmt_type"):
f.atmt_cond = {}
f.atmt_type = ATMT.ACTION
f.atmt_cond[cond.atmt_condname] = prio
return f
return deco
@staticmethod
def condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.CONDITION
f.atmt_state = state.atmt_state
f.atmt_condname = f.func_name
f.atmt_prio = prio
return f
return deco
@staticmethod
def receive_condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.RECV
f.atmt_state = state.atmt_state
f.atmt_condname = f.func_name
f.atmt_prio = prio
return f
return deco
@staticmethod
def ioevent(state, name, prio=0, as_supersocket=None):
def deco(f, state=state):
f.atmt_type = ATMT.IOEVENT
f.atmt_state = state.atmt_state
f.atmt_condname = f.func_name
f.atmt_ioname = name
f.atmt_prio = prio
f.atmt_as_supersocket = as_supersocket
return f
return deco
@staticmethod
def timeout(state, timeout):
def deco(f, state=state, timeout=timeout):
f.atmt_type = ATMT.TIMEOUT
f.atmt_state = state.atmt_state
f.atmt_timeout = timeout
f.atmt_condname = f.func_name
return f
return deco
class _ATMT_Command:
RUN = "RUN"
NEXT = "NEXT"
FREEZE = "FREEZE"
STOP = "STOP"
END = "END"
EXCEPTION = "EXCEPTION"
SINGLESTEP = "SINGLESTEP"
BREAKPOINT = "BREAKPOINT"
INTERCEPT = "INTERCEPT"
ACCEPT = "ACCEPT"
REPLACE = "REPLACE"
REJECT = "REJECT"
class _ATMT_supersocket(SuperSocket):
def __init__(self, name, ioevent, automaton, proto, args, kargs):
self.name = name
self.ioevent = ioevent
self.proto = proto
self.spa,self.spb = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
kargs["external_fd"] = {ioevent:self.spb}
self.atmt = automaton(*args, **kargs)
self.atmt.runbg()
def fileno(self):
return self.spa.fileno()
def send(self, s):
if type(s) is not str:
s = str(s)
return self.spa.send(s)
def recv(self, n=MTU):
r = self.spa.recv(n)
if self.proto is not None:
r = self.proto(r)
return r
def close(self):
pass
class _ATMT_to_supersocket:
def __init__(self, name, ioevent, automaton):
self.name = name
self.ioevent = ioevent
self.automaton = automaton
def __call__(self, proto, *args, **kargs):
return _ATMT_supersocket(self.name, self.ioevent, self.automaton, proto, args, kargs)
class Automaton_metaclass(type):
def __new__(cls, name, bases, dct):
cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct)
cls.states={}
cls.state = None
cls.recv_conditions={}
cls.conditions={}
cls.ioevents={}
cls.timeout={}
cls.actions={}
cls.initial_states=[]
cls.ionames = []
cls.iosupersockets = []
members = {}
classes = [cls]
while classes:
c = classes.pop(0) # order is important to avoid breaking method overloading
classes += list(c.__bases__)
for k,v in c.__dict__.iteritems():
if k not in members:
members[k] = v
decorated = [v for v in members.itervalues()
if type(v) is types.FunctionType and hasattr(v, "atmt_type")]
for m in decorated:
if m.atmt_type == ATMT.STATE:
s = m.atmt_state
cls.states[s] = m
cls.recv_conditions[s]=[]
cls.ioevents[s]=[]
cls.conditions[s]=[]
cls.timeout[s]=[]
if m.atmt_initial:
cls.initial_states.append(m)
elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT, ATMT.IOEVENT]:
cls.actions[m.atmt_condname] = []
for m in decorated:
if m.atmt_type == ATMT.CONDITION:
cls.conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.RECV:
cls.recv_conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.IOEVENT:
cls.ioevents[m.atmt_state].append(m)
cls.ionames.append(m.atmt_ioname)
if m.atmt_as_supersocket is not None:
cls.iosupersockets.append(m)
elif m.atmt_type == ATMT.TIMEOUT:
cls.timeout[m.atmt_state].append((m.atmt_timeout, m))
elif m.atmt_type == ATMT.ACTION:
for c in m.atmt_cond:
cls.actions[c].append(m)
for v in cls.timeout.itervalues():
v.sort(lambda (t1,f1),(t2,f2): cmp(t1,t2))
v.append((None, None))
for v in itertools.chain(cls.conditions.itervalues(),
cls.recv_conditions.itervalues(),
cls.ioevents.itervalues()):
v.sort(lambda c1,c2: cmp(c1.atmt_prio,c2.atmt_prio))
for condname,actlst in cls.actions.iteritems():
actlst.sort(lambda c1,c2: cmp(c1.atmt_cond[condname], c2.atmt_cond[condname]))
for ioev in cls.iosupersockets:
setattr(cls, ioev.atmt_as_supersocket, _ATMT_to_supersocket(ioev.atmt_as_supersocket, ioev.atmt_ioname, cls))
return cls
def graph(self, **kargs):
s = 'digraph "%s" {\n' % self.__class__.__name__
se = "" # Keep initial nodes at the begining for better rendering
for st in self.states.itervalues():
if st.atmt_initial:
se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state)+se
elif st.atmt_final:
se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state
elif st.atmt_error:
se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state
s += se
for st in self.states.itervalues():
for n in st.atmt_origfunc.func_code.co_names+st.atmt_origfunc.func_code.co_consts:
if n in self.states:
s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state,n)
for c,k,v in ([("purple",k,v) for k,v in self.conditions.items()]+
[("red",k,v) for k,v in self.recv_conditions.items()]+
[("orange",k,v) for k,v in self.ioevents.items()]):
for f in v:
for n in f.func_code.co_names+f.func_code.co_consts:
if n in self.states:
l = f.atmt_condname
for x in self.actions[f.atmt_condname]:
l += "\\l>[%s]" % x.func_name
s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k,n,l,c)
for k,v in self.timeout.iteritems():
for t,f in v:
if f is None:
continue
for n in f.func_code.co_names+f.func_code.co_consts:
if n in self.states:
l = "%s/%.1fs" % (f.atmt_condname,t)
for x in self.actions[f.atmt_condname]:
l += "\\l>[%s]" % x.func_name
s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k,n,l)
s += "}\n"
return do_graph(s, **kargs)
class Automaton:
__metaclass__ = Automaton_metaclass
## Methods to overload
def parse_args(self, debug=0, store=1, **kargs):
self.debug_level=debug
self.socket_kargs = kargs
self.store_packets = store
def master_filter(self, pkt):
return True
def my_send(self, pkt):
self.send_sock.send(pkt)
## Utility classes and exceptions
class _IO_fdwrapper:
def __init__(self,rd,wr):
if rd is not None and type(rd) is not int:
rd = rd.fileno()
if wr is not None and type(wr) is not int:
wr = wr.fileno()
self.rd = rd
self.wr = wr
def fileno(self):
return self.rd
def read(self, n=65535):
return os.read(self.rd, n)
def write(self, msg):
return os.write(self.wr,msg)
def recv(self, n=65535):
return self.read(n)
def send(self, msg):
return self.write(msg)
class _IO_mixer:
def __init__(self,rd,wr):
self.rd = rd
self.wr = wr
def fileno(self):
if type(self.rd) is int:
return self.rd
return self.rd.fileno()
def recv(self, n=None):
return self.rd.recv(n)
def read(self, n=None):
return self.rd.recv(n)
def send(self, msg):
return self.wr.send(msg)
def write(self, msg):
return self.wr.send(msg)
class AutomatonException(Exception):
def __init__(self, msg, state=None, result=None):
Exception.__init__(self, msg)
self.state = state
self.result = result
class AutomatonError(AutomatonException):
pass
class ErrorState(AutomatonException):
pass
class Stuck(AutomatonException):
pass
class AutomatonStopped(AutomatonException):
pass
class Breakpoint(AutomatonStopped):
pass
class Singlestep(AutomatonStopped):
pass
class InterceptionPoint(AutomatonStopped):
def __init__(self, msg, state=None, result=None, packet=None):
Automaton.AutomatonStopped.__init__(self, msg, state=state, result=result)
self.packet = packet
class CommandMessage(AutomatonException):
pass
## Services
def debug(self, lvl, msg):
if self.debug_level >= lvl:
log_interactive.debug(msg)
def send(self, pkt):
if self.state.state in self.interception_points:
self.debug(3,"INTERCEPT: packet intercepted: %s" % pkt.summary())
self.intercepted_packet = pkt
cmd = Message(type = _ATMT_Command.INTERCEPT, state=self.state, pkt=pkt)
self.cmdout.send(cmd)
cmd = self.cmdin.recv()
self.intercepted_packet = None
if cmd.type == _ATMT_Command.REJECT:
self.debug(3,"INTERCEPT: packet rejected")
return
elif cmd.type == _ATMT_Command.REPLACE:
pkt = cmd.pkt
self.debug(3,"INTERCEPT: packet replaced by: %s" % pkt.summary())
elif cmd.type == _ATMT_Command.ACCEPT:
self.debug(3,"INTERCEPT: packet accepted")
else:
raise self.AutomatonError("INTERCEPT: unkown verdict: %r" % cmd.type)
self.my_send(pkt)
self.debug(3,"SENT : %s" % pkt.summary())
if self.store_packets:
self.packets.append(pkt.copy())
## Internals
def __init__(self, *args, **kargs):
external_fd = kargs.pop("external_fd",{})
self.send_sock_class = kargs.pop("ll", conf.L3socket)
self.recv_sock_class = kargs.pop("recvsock", conf.L2listen)
self.started = thread.allocate_lock()
self.threadid = None
self.breakpointed = None
self.breakpoints = set()
self.interception_points = set()
self.intercepted_packet = None
self.debug_level=0
self.init_args=args
self.init_kargs=kargs
self.io = type.__new__(type, "IOnamespace",(),{})
self.oi = type.__new__(type, "IOnamespace",(),{})
self.cmdin = ObjectPipe()
self.cmdout = ObjectPipe()
self.ioin = {}
self.ioout = {}
for n in self.ionames:
extfd = external_fd.get(n)
if type(extfd) is not tuple:
extfd = (extfd,extfd)
ioin,ioout = extfd
if ioin is None:
ioin = ObjectPipe()
elif type(ioin) is not types.InstanceType:
ioin = self._IO_fdwrapper(ioin,None)
if ioout is None:
ioout = ObjectPipe()
elif type(ioout) is not types.InstanceType:
ioout = self._IO_fdwrapper(None,ioout)
self.ioin[n] = ioin
self.ioout[n] = ioout
ioin.ioname = n
ioout.ioname = n
setattr(self.io, n, self._IO_mixer(ioout,ioin))
setattr(self.oi, n, self._IO_mixer(ioin,ioout))
for stname in self.states:
setattr(self, stname,
_instance_state(getattr(self, stname)))
self.parse_args(*args, **kargs)
self.start()
def __iter__(self):
return self
def __del__(self):
self.stop()
def _run_condition(self, cond, *args, **kargs):
try:
self.debug(5, "Trying %s [%s]" % (cond.atmt_type, cond.atmt_condname))
cond(self,*args, **kargs)
except ATMT.NewStateRequested, state_req:
self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state))
if cond.atmt_type == ATMT.RECV:
if self.store_packets:
self.packets.append(args[0])
for action in self.actions[cond.atmt_condname]:
self.debug(2, " + Running action [%s]" % action.func_name)
action(self, *state_req.action_args, **state_req.action_kargs)
raise
except Exception,e:
self.debug(2, "%s [%s] raised exception [%s]" % (cond.atmt_type, cond.atmt_condname, e))
raise
else:
self.debug(2, "%s [%s] not taken" % (cond.atmt_type, cond.atmt_condname))
def _do_start(self, *args, **kargs):
thread.start_new_thread(self._do_control, args, kargs)
def _do_control(self, *args, **kargs):
with self.started:
self.threadid = thread.get_ident()
# Update default parameters
a = args+self.init_args[len(args):]
k = self.init_kargs.copy()
k.update(kargs)
self.parse_args(*a,**k)
# Start the automaton
self.state=self.initial_states[0](self)
self.send_sock = self.send_sock_class()
self.listen_sock = self.recv_sock_class(**self.socket_kargs)
self.packets = PacketList(name="session[%s]"%self.__class__.__name__)
singlestep = True
iterator = self._do_iter()
self.debug(3, "Starting control thread [tid=%i]" % self.threadid)
try:
while True:
c = self.cmdin.recv()
self.debug(5, "Received command %s" % c.type)
if c.type == _ATMT_Command.RUN:
singlestep = False
elif c.type == _ATMT_Command.NEXT:
singlestep = True
elif c.type == _ATMT_Command.FREEZE:
continue
elif c.type == _ATMT_Command.STOP:
break
while True:
state = iterator.next()
if isinstance(state, self.CommandMessage):
break
elif isinstance(state, self.Breakpoint):
c = Message(type=_ATMT_Command.BREAKPOINT,state=state)
self.cmdout.send(c)
break
if singlestep:
c = Message(type=_ATMT_Command.SINGLESTEP,state=state)
self.cmdout.send(c)
break
except StopIteration,e:
c = Message(type=_ATMT_Command.END, result=e.args[0])
self.cmdout.send(c)
except Exception,e:
exc_info = sys.exc_info()
self.debug(3, "Transfering exception from tid=%i:\n%s"% (self.threadid, traceback.format_exc(exc_info)))
m = Message(type=_ATMT_Command.EXCEPTION, exception=e, exc_info=exc_info)
self.cmdout.send(m)
self.debug(3, "Stopping control thread (tid=%i)"%self.threadid)
self.threadid = None
def _do_iter(self):
while True:
try:
self.debug(1, "## state=[%s]" % self.state.state)
# Entering a new state. First, call new state function
if self.state.state in self.breakpoints and self.state.state != self.breakpointed:
self.breakpointed = self.state.state
yield self.Breakpoint("breakpoint triggered on state %s" % self.state.state,
state = self.state.state)
self.breakpointed = None
state_output = self.state.run()
if self.state.error:
raise self.ErrorState("Reached %s: [%r]" % (self.state.state, state_output),
result=state_output, state=self.state.state)
if self.state.final:
raise StopIteration(state_output)
if state_output is None:
state_output = ()
elif type(state_output) is not list:
state_output = state_output,
# Then check immediate conditions
for cond in self.conditions[self.state.state]:
self._run_condition(cond, *state_output)
# If still there and no conditions left, we are stuck!
if ( len(self.recv_conditions[self.state.state]) == 0 and
len(self.ioevents[self.state.state]) == 0 and
len(self.timeout[self.state.state]) == 1 ):
raise self.Stuck("stuck in [%s]" % self.state.state,
state=self.state.state, result=state_output)
# Finally listen and pay attention to timeouts
expirations = iter(self.timeout[self.state.state])
next_timeout,timeout_func = expirations.next()
t0 = time.time()
fds = [self.cmdin]
if len(self.recv_conditions[self.state.state]) > 0:
fds.append(self.listen_sock)
for ioev in self.ioevents[self.state.state]:
fds.append(self.ioin[ioev.atmt_ioname])
while 1:
t = time.time()-t0
if next_timeout is not None:
if next_timeout <= t:
self._run_condition(timeout_func, *state_output)
next_timeout,timeout_func = expirations.next()
if next_timeout is None:
remain = None
else:
remain = next_timeout-t
self.debug(5, "Select on %r" % fds)
r,_,_ = select(fds,[],[],remain)
self.debug(5, "Selected %r" % r)
for fd in r:
self.debug(5, "Looking at %r" % fd)
if fd == self.cmdin:
yield self.CommandMessage("Received command message")
elif fd == self.listen_sock:
pkt = self.listen_sock.recv(MTU)
if pkt is not None:
if self.master_filter(pkt):
self.debug(3, "RECVD: %s" % pkt.summary())
for rcvcond in self.recv_conditions[self.state.state]:
self._run_condition(rcvcond, pkt, *state_output)
else:
self.debug(4, "FILTR: %s" % pkt.summary())
else:
self.debug(3, "IOEVENT on %s" % fd.ioname)
for ioevt in self.ioevents[self.state.state]:
if ioevt.atmt_ioname == fd.ioname:
self._run_condition(ioevt, fd, *state_output)
except ATMT.NewStateRequested,state_req:
self.debug(2, "switching from [%s] to [%s]" % (self.state.state,state_req.state))
self.state = state_req
yield state_req
## Public API
def add_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt,"atmt_state"):
ipt = ipt.atmt_state
self.interception_points.add(ipt)
def remove_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt,"atmt_state"):
ipt = ipt.atmt_state
self.interception_points.discard(ipt)
def add_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp,"atmt_state"):
bp = bp.atmt_state
self.breakpoints.add(bp)
def remove_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp,"atmt_state"):
bp = bp.atmt_state
self.breakpoints.discard(bp)
def start(self, *args, **kargs):
if not self.started.locked():
self._do_start(*args, **kargs)
def run(self, resume=None, wait=True):
if resume is None:
resume = Message(type = _ATMT_Command.RUN)
self.cmdin.send(resume)
if wait:
try:
c = self.cmdout.recv()
except KeyboardInterrupt:
self.cmdin.send(Message(type = _ATMT_Command.FREEZE))
return
if c.type == _ATMT_Command.END:
return c.result
elif c.type == _ATMT_Command.INTERCEPT:
raise self.InterceptionPoint("packet intercepted", state=c.state.state, packet=c.pkt)
elif c.type == _ATMT_Command.SINGLESTEP:
raise self.Singlestep("singlestep state=[%s]"%c.state.state, state=c.state.state)
elif c.type == _ATMT_Command.BREAKPOINT:
raise self.Breakpoint("breakpoint triggered on state [%s]"%c.state.state, state=c.state.state)
elif c.type == _ATMT_Command.EXCEPTION:
raise c.exc_info[0],c.exc_info[1],c.exc_info[2]
def runbg(self, resume=None, wait=False):
self.run(resume, wait)
def next(self):
return self.run(resume = Message(type=_ATMT_Command.NEXT))
def stop(self):
self.cmdin.send(Message(type=_ATMT_Command.STOP))
with self.started:
# Flush command pipes
while True:
r,_,_ = select([self.cmdin, self.cmdout],[],[],0)
if not r:
break
for fd in r:
fd.recv()
def restart(self, *args, **kargs):
self.stop()
self.start(*args, **kargs)
def accept_packet(self, pkt=None, wait=False):
rsm = Message()
if pkt is None:
rsm.type = _ATMT_Command.ACCEPT
else:
rsm.type = _ATMT_Command.REPLACE
rsm.pkt = pkt
return self.run(resume=rsm, wait=wait)
def reject_packet(self, wait=False):
rsm = Message(type = _ATMT_Command.REJECT)
return self.run(resume=rsm, wait=wait)
| gpl-2.0 |
zararah/memorial-page | submissions/admin.py | 4 | 1784 | from django.contrib import admin
from models import Submission, Image, Link
from django.core.exceptions import PermissionDenied
from django_object_actions import DjangoObjectActions
from django.contrib.admin import SimpleListFilter
from datetime import datetime
class ModerationFilter(SimpleListFilter):
title = 'Accepted'
parameter_name = 'accepted'
def lookups(self, request, model_admin):
return (('unsent', 'Noch nicht abgesendet',),
('sent_not_accepted', 'Abgeschickt und nicht akzeptiert',),
('sent_accepted', 'Abgeschickt und akzeptiert',),)
def queryset(self, request, queryset):
submitted_at__isnull = self.value() == 'unsent'
accepted_at__isnull = not self.value() == 'sent_accepted'
return queryset.filter(accepted_at__isnull=accepted_at__isnull,
submitted_at__isnull=submitted_at__isnull)
class ImageInlineAdmin(admin.TabularInline):
model = Image
class LinkInlineAdmin(admin.TabularInline):
model = Link
class SubmissionAdmin(DjangoObjectActions, admin.ModelAdmin):
inlines = [ImageInlineAdmin,LinkInlineAdmin]
actions = ['approve']
list_filter = ('accepted_at',ModerationFilter)
def approve(self, request, queryset):
for x in queryset:
self.approve_obj(self, request, x)
return None
def approve_obj(self, request, obj):
if not self.has_change_permission(request):
raise PermissionDenied
obj.accepted_at=datetime.now()
obj.accepted_by=request.user
obj.save()
self.message_user(request, "Successfully marked submissions as accepted.")
approve_obj.label = 'Approve'
objectactions = ['approve_obj']
admin.site.register(Submission, SubmissionAdmin)
| mit |
mixman/djangodev | django/conf/locale/en/formats.py | 318 | 1637 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = u'.'
THOUSAND_SEPARATOR = u','
NUMBER_GROUPING = 3
| bsd-3-clause |
jamesliu/mxnet | tests/python/quantization_gpu/test_quantization_gpu.py | 18 | 1144 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import mxnet as mx
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../quantization'))
from mxnet.test_utils import set_default_context
from test_quantization import *
set_default_context(mx.gpu(0))
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 |
7fever/script.pseudotv.live | resources/lib/Globals.py | 2 | 20378 | # Copyright (C) 2013 Kevin S. Graer
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
import os, sys, re
import xbmcaddon, xbmc, xbmcgui, xbmcvfs
import Settings
from FileAccess import FileLock
# Commoncache plugin import
try:
import StorageServer
except Exception,e:
import storageserverdummy as StorageServer
def log(msg, level = xbmc.LOGDEBUG):
try:
xbmc.log(ADDON_ID + '-' + ascii(msg), level)
except Exception,e:
pass
def uni(string, encoding = 'utf-8'):
if isinstance(string, basestring):
if not isinstance(string, unicode):
string = unicode(string, encoding, errors='ignore')
return string
def utf(string):
if isinstance(string, basestring):
if isinstance(string, unicode):
string = string.encode( 'utf-8', 'ignore' )
return string
def ascii(string):
if isinstance(string, basestring):
if isinstance(string, unicode):
string = string.encode('ascii', 'ignore')
return string
# Plugin Info
ADDON_ID = 'script.pseudotv.live'
REAL_SETTINGS = xbmcaddon.Addon(id=ADDON_ID)
ADDON_ID = REAL_SETTINGS.getAddonInfo('id')
ADDON_NAME = REAL_SETTINGS.getAddonInfo('name')
ADDON_PATH = (REAL_SETTINGS.getAddonInfo('path').decode('utf-8'))
ADDON_VERSION = REAL_SETTINGS.getAddonInfo('version')
xbmc.log(ADDON_ID +' '+ ADDON_NAME +' '+ ADDON_PATH +' '+ ADDON_VERSION)
# API Keys
TVDB_API_KEY = '078845CE15BC08A7'
TMDB_API_KEY = '9c47d05a3f5f3a00104f6586412306af'
FANARTTV_API_KEY = '7bc4161cc4add99b14e51eddcdd5b985'
# Timers
AUTOSTART_TIMER = [0,5,10,15,20]#in seconds
ART_TIMER = [6,12,24,48,72]
SHORT_CLIP_ENUM = [15,30,60,90,120,240,360,480]#in seconds
INFOBAR_TIMER = [3,5,10,15,20,25]#in seconds
MEDIA_LIMIT = [25,50,100,250,500,1000,0]#Media Per/Channel, 0 = Unlimited
REFRESH_INT = [14400,28800,43200,86400]#in seconds (4|8|12|24hrs)
TIMEOUT = 15 * 1000
TOTAL_FILL_CHANNELS = 20
PREP_CHANNEL_TIME = 60 * 60 * 24 * 5
ALLOW_CHANNEL_HISTORY_TIME = 60 * 60 * 24 * 1
NOTIFICATION_CHECK_TIME = 5
NOTIFICATION_TIME_BEFORE_END = 240
NOTIFICATION_DISPLAY_TIME = 8
# Rules/Modes
RULES_ACTION_START = 1
RULES_ACTION_JSON = 2
RULES_ACTION_LIST = 4
RULES_ACTION_BEFORE_CLEAR = 8
RULES_ACTION_BEFORE_TIME = 16
RULES_ACTION_FINAL_MADE = 32
RULES_ACTION_FINAL_LOADED = 64
RULES_ACTION_OVERLAY_SET_CHANNEL = 128
RULES_ACTION_OVERLAY_SET_CHANNEL_END = 256
MODE_RESUME = 1
MODE_ALWAYSPAUSE = 2
MODE_ORDERAIRDATE = 4
MODE_RANDOM = 8
MODE_REALTIME = 16
MODE_SERIAL = MODE_RESUME | MODE_ALWAYSPAUSE | MODE_ORDERAIRDATE
MODE_STARTMODES = MODE_RANDOM | MODE_REALTIME | MODE_RESUME
# Maximum is 10 for this
RULES_PER_PAGE = 7
#UPNP Clients
IPP1 = (REAL_SETTINGS.getSetting("UPNP1_IPP"))
IPP2 = (REAL_SETTINGS.getSetting("UPNP2_IPP"))
IPP3 = (REAL_SETTINGS.getSetting("UPNP3_IPP"))
#LOCATIONS
SETTINGS_LOC = REAL_SETTINGS.getAddonInfo('profile') #LOCKED
CHANNELS_LOC = os.path.join(SETTINGS_LOC, 'cache') + '/' #LOCKED
MADE_CHAN_LOC = os.path.join(CHANNELS_LOC, 'stored') + '/' #LOCKED
GEN_CHAN_LOC = os.path.join(CHANNELS_LOC, 'generated') + '/' #LOCKED
IMAGES_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'images')) + '/'
PTVL_SKIN_LOC = os.path.join(ADDON_PATH, 'resources', 'skins') #Path to PTVL Skin folder
LOGO_LOC = xbmc.translatePath(REAL_SETTINGS.getSetting('ChannelLogoFolder')) #Channel Logo location
PVR_DOWNLOAD_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('PVR_Folder'))) #PVR Download location
XMLTV_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('xmltvLOC'))) + '/'
#BASEURL
USERPASS = REAL_SETTINGS.getSetting('Donor_UP')
BASEURL = 'http://pseudotvlive.com/ptvl/'
PTVLURL = 'http://'+USERPASS+'@pseudotvlive.com/ptvl/'
# Core Default Image Locations
DEFAULT_MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', 'Default', 'media')) + '/'
DEFAULT_EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', 'Default', 'media', 'epg-genres')) + '/'
DEFAULT_LOGO_LOC = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'logos')) + '/'
#CORE IMG FILENAMES
TIME_BAR = 'pstvTimeBar.png'
BUTTON_FOCUS = 'pstvButtonFocus.png'
BUTTON_NO_FOCUS = 'pstvButtonNoFocus.png'
BUTTON_NO_FOCUS_ALT = 'pstvButtonNoFocusAlt.png'
THUMB = (IMAGES_LOC + 'icon.png')
#Channel Sharing location
if REAL_SETTINGS.getSetting('ChannelSharing') == "true":
CHANNEL_SHARING = True
LOCK_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('SettingsFolder'), 'cache')) + '/'
XMLTV_CACHE_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('SettingsFolder'), 'cache', 'xmltv')) + '/'
STRM_CACHE_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('SettingsFolder'), 'cache', 'strm',''))
EPGGENRE_CACHE_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('SettingsFolder'), 'cache', 'epg-genres')) + '/' #Post EPG IMG Processing location for future use!
ART_LOC = xbmc.translatePath(os.path.join(REAL_SETTINGS.getSetting('SettingsFolder'), 'cache', 'artwork')) + '/' #Missing Artwork cache location
else:
CHANNEL_SHARING = False
LOCK_LOC = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'cache')) + '/'
XMLTV_CACHE_LOC = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'cache', 'xmltv')) + '/'
STRM_CACHE_LOC = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'cache', 'strm',''))
EPGGENRE_CACHE_LOC = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'cache', 'epg-genres')) + '/' #Post EPG IMG Processing location for future use!
ART_LOC = xbmc.translatePath(os.path.join(SETTINGS_LOC, 'cache', 'artwork')) + '/' #Missing Artwork cache location
#XMLTV FILENAME
PTVLXML = (os.path.join(XMLTV_CACHE_LOC, 'ptvlguide.xml'))
# SKIN SELECT
# Custom skin downloader todo.
if int(REAL_SETTINGS.getSetting('SkinSelector')) == 0:
#Use XBMC's included PTVL skin, else Default.
if os.path.exists(xbmc.translatePath('special://skin/media/script.pseudotv.lite/')):
Skin_Select = 'special://skin/media/'
MEDIA_LOC = xbmc.translatePath(os.path.join(Skin_Select, 'script.pseudotv.lite')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(MEDIA_LOC, 'epg-genres')) + '/'
else:
Skin_Select = 'Custom'
MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media', 'epg-genres')) + '/'
elif int(REAL_SETTINGS.getSetting('SkinSelector')) == 1:
Skin_Select = 'Default'
MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media', 'epg-genres')) + '/'
elif int(REAL_SETTINGS.getSetting('SkinSelector')) == 2:
Skin_Select = 'PTVL'
MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media', 'epg-genres')) + '/'
elif int(REAL_SETTINGS.getSetting('SkinSelector')) == 3:
Skin_Select = 'Concast'
MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media', 'epg-genres')) + '/'
elif int(REAL_SETTINGS.getSetting('SkinSelector')) == 4:
Skin_Select = 'Maverick'
MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media', 'epg-genres')) + '/'
elif int(REAL_SETTINGS.getSetting('SkinSelector')) == 5:
Skin_Select = 'Z81'
MEDIA_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media')) + '/'
EPGGENRE_LOC = xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'skins', Skin_Select, 'media', 'epg-genres')) + '/'
#Double check core image folders
if not xbmcvfs.exists(MEDIA_LOC):
print 'forcing default DEFAULT_MEDIA_LOC'
MEDIA_LOC = DEFAULT_MEDIA_LOC
if not xbmcvfs.exists(EPGGENRE_LOC):
print 'forcing default DEFAULT_EPGGENRE_LOC'
EPGGENRE_LOC = DEFAULT_EPGGENRE_LOC
# Find XBMC Skin path
if xbmcvfs.exists(xbmc.translatePath(os.path.join('special://','skin','720p',''))):
XBMC_SKIN_LOC = xbmc.translatePath(os.path.join('special://','skin','720p',''))
else:
XBMC_SKIN_LOC = xbmc.translatePath(os.path.join('special://','skin','1080i',''))
# Find PTVL selected skin folder 720 or 1080i ?
if xbmcvfs.exists(os.path.join(PTVL_SKIN_LOC, Skin_Select, '720p','')):
PTVL_SKIN_SELECT = xbmc.translatePath(os.path.join(PTVL_SKIN_LOC, Skin_Select, '720p')) + '/'
else:
PTVL_SKIN_SELECT = xbmc.translatePath(os.path.join(PTVL_SKIN_LOC, Skin_Select, '1080i')) + '/'
# PseudoTV Cache Control
if REAL_SETTINGS.getSetting("Cache_Enabled") == 'true': #
Cache_Enabled = True
xbmc.log("script.pseudotv.live-Globals: System Caching Enabled")
else:
Cache_Enabled = False
xbmc.log("script.pseudotv.live-Globals: System Caching Disabled")
# Globals
dlg = xbmcgui.Dialog()
ADDON_SETTINGS = Settings.Settings()
GlobalFileLock = FileLock()
Donor_Downloaded = False
NOTIFY = REAL_SETTINGS.getSetting('notify') == "true"
SILENT = REAL_SETTINGS.getSetting('silent')
DEBUG = REAL_SETTINGS.getSetting('enable_Debug')
SETTOP = REAL_SETTINGS.getSetting("EnableSettop") == "true"
OS_SET = int(REAL_SETTINGS.getSetting("os"))
if REAL_SETTINGS.getSetting('EnableSettop') == 'true':
SETTOP_REFRESH = REFRESH_INT[int(REAL_SETTINGS.getSetting('REFRESH_INT'))]
else:
SETTOP_REFRESH = 72000
if (OS_SET <= 5 or OS_SET == 10 or OS_SET == 12) and REAL_SETTINGS.getSetting("OS_SET_OVERRIDE") != "true":
LOWPOWER = True
else:
LOWPOWER = False
# Common Cache types, Stacked and sorted for read performance... Todo convert to local db, mysql?
#General
quarterly = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "quarterly",6) #System Purge, AutoUpdate
bidaily = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "bidaily",12) #System Purge, AutoUpdate
daily = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "daily",24) #System Purge, AutoUpdate
weekly = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "weekly",24 * 7) #System Purge, AutoUpdate
seasonal = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "seasonal",((24 * 7) * 3)) #System Purge, AutoUpdate
monthly = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "monthly",((24 * 7) * 4)) #System Purge, AutoUpdate
#FileLists
localTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "localTV",(((SETTOP_REFRESH / 60) / 60) - 3600))#ForceReset
liveTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "liveTV",24) #System Purge, AutoUpdate
YoutubeTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "YoutubeTV",48) #System Purge, AutoUpdate
RSSTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "RSSTV",48) #System Purge, AutoUpdate
pluginTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "pluginTV",72) #System Purge, AutoUpdate
upnpTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "playonTV",2) #System Purge, AutoUpdate
lastfm = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "lastfm",48) #System Purge, AutoUpdate
#BCTs
bumpers = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "bumpers",((24 * 7) * 4)) #BCT Purge
ratings = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "ratings",((24 * 7) * 4)) #BCT Purge
commercials = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "commercials",((24 * 7) * 4)) #BCT Purge
trailers = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "trailers",((24 * 7) * 4)) #BCT Purge
#Parsers
parsers = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "parsers",((24 * 7) * 4)) #No Purge (API Queries)
parserFANTV = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "parserFANTV",((24 * 7) * 4)) #No Purge (FANART Queries)
parserTVDB = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "parserTVDB",((24 * 7) * 4)) #No Purge (TVDB Queries)
parserTMDB = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "parserTMDB",((24 * 7) * 4)) #No Purge (TMDB Queries)
#Artwork
artwork = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork",((24 * 7) * 4)) #Artwork Purge
artwork1 = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork1",((24 * 7) * 4)) #Artwork Purge
artwork2 = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork2",((24 * 7) * 4)) #Artwork Purge
artwork3 = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork3",((24 * 7) * 4)) #Artwork Purge
artwork4 = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork4",((24 * 7) * 4)) #Artwork Purge
artwork5 = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork5",((24 * 7) * 4)) #Artwork Purge
artwork6 = StorageServer.StorageServer("plugin://script.pseudotv.live/" + "artwork6",((24 * 7) * 4)) #Artwork Purge
# HEX COLOR OPTIONS 4 (Overlay CHANBUG, EPG Genre & CHtype)
# http://www.w3schools.com/html/html_colornames.asp
COLOR_RED = '#FF0000'
COLOR_GREEN = '#008000'
COLOR_mdGREEN = '#3CB371'
COLOR_BLUE = '#0000FF'
COLOR_ltBLUE = '#ADD8E6'
COLOR_CYAN = '#00FFFF'
COLOR_ltCYAN = '##E0FFFF'
COLOR_PURPLE = '#800080'
COLOR_ltPURPLE = '#9370DB'
COLOR_ORANGE = '#FFA500'
COLOR_YELLOW = '#FFFF00'
COLOR_GRAY = '#808080'
COLOR_ltGRAY = '#D3D3D3'
COLOR_mdGRAY = '#696969'
COLOR_dkGRAY = '#A9A9A9'
COLOR_BLACK = '#000000'
COLOR_WHITE = '#FFFFFF'
COLOR_HOLO = 'FF0297eb'
COLOR_SMOKE = '#F5F5F5'
# EPG Chtype/Genre COLOR TYPES
COLOR_RED_TYPE = ['10', '17', 'TV-MA', 'R', 'NC-17', 'Youtube', 'Sport', 'Sports Event', 'Sports Talk', 'Archery', 'Rodeo', 'Card Games', 'Martial Arts', 'Basketball', 'Baseball', 'Hockey', 'Football', 'Boxing', 'Golf', 'Auto Racing', 'Playoff Sports', 'Hunting', 'Gymnastics', 'Shooting', 'Sports non-event']
COLOR_GREEN_TYPE = ['5', 'News', 'Public Affairs', 'Newsmagazine', 'Politics', 'Entertainment', 'Community', 'Talk', 'Interview', 'Weather']
COLOR_mdGREEN_TYPE = ['9', 'Suspense', 'Horror', 'Horror Suspense', 'Paranormal', 'Thriller', 'Fantasy']
COLOR_BLUE_TYPE = ['Comedy', 'Comedy-Drama', 'Romance-Comedy', 'Sitcom', 'Comedy-Romance']
COLOR_ltBLUE_TYPE = ['2', '4', '14', '15', '16', 'Movie']
COLOR_CYAN_TYPE = ['8', 'Documentary', 'History', 'Biography', 'Educational', 'Animals', 'Nature', 'Health']
COLOR_ltCYAN_TYPE = ['Outdoors', 'Special', 'Reality']
COLOR_PURPLE_TYPE = ['Drama', 'Romance', 'Historical Drama']
COLOR_ltPURPLE_TYPE = ['12', '13', 'LastFM', 'Vevo', 'VevoTV', 'Musical', 'Music', 'Musical Comedy']
COLOR_ORANGE_TYPE = ['11', 'TV-PG', 'TV-14', 'PG', 'PG-13', 'RSS', 'Animation', 'Animated', 'Anime', 'Children', 'Cartoon', 'Family']
COLOR_YELLOW_TYPE = ['1', '3', '6', 'TV-Y7', 'TV-Y', 'TV-G', 'G', 'Action', 'Adventure', 'Action and Adventure', 'Action Adventure', 'Crime', 'Crime Drama', 'Mystery', 'Science Fiction', 'Series', 'Western', 'Soap', 'Variety', 'War', 'Law', 'Adults Only']
COLOR_GRAY_TYPE = ['Auto', 'Collectibles', 'Travel', 'Shopping', 'House Garden', 'Home and Garden', 'Gardening', 'Fitness Health', 'Fitness', 'Home Improvement', 'How-To', 'Cooking', 'Fashion', 'Aviation', 'Dance', 'Auction', 'Art', 'Exercise', 'Parenting']
COLOR_ltGRAY_TYPE = ['0', '7', 'NR', 'Consumer', 'Game Show', 'Other', 'Unknown', 'Religious', 'Anthology', 'None']
# http://developer.android.com/reference/android/graphics/Color.html
# ['COLOR_HOLO', 'COLOR_CYAN', 'COLOR_GREEN', 'COLOR_GRAY', 'COLOR_ltGRAY', 'COLOR_WHITE']
COLOR_CHANNUM = ['0xFF0297eb', '0xC0C0C0C0', '0xff00ff00', '0xff888888', '0xffcccccc', '0xffffffff']
CHANBUG_COLOR = COLOR_CHANNUM[int(REAL_SETTINGS.getSetting('COLOR_CHANNUM'))]
#Actions
#https://github.com/xbmc/xbmc/blob/master/xbmc/guilib/Key.h
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_UP = 3
ACTION_MOVE_DOWN = 4
ACTION_PAGEUP = 5
ACTION_PAGEDOWN = 6
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = (9, 10, 92, 247, 257, 275, 61467, 61448)
##KEY_BUTTON_BACK = 275
##ACTION_NAV_BACK = 92
ACTION_SHOW_INFO = 11
ACTION_PAUSE = 12
ACTION_STOP = 13
ACTION_OSD = 122
ACTION_NUMBER_0 = 58
ACTION_NUMBER_1 = 59
ACTION_NUMBER_2 = 60
ACTION_NUMBER_3 = 61
ACTION_NUMBER_4 = 62
ACTION_NUMBER_5 = 63
ACTION_NUMBER_6 = 64
ACTION_NUMBER_7 = 65
ACTION_NUMBER_8 = 66
ACTION_NUMBER_9 = 67
ACTION_INVALID = 999
ACTION_SHOW_SUBTITLES = 25 #turn subtitles on/off.
ACTION_AUDIO_NEXT_LANGUAGE = 56 #Select next language in movie
ACTION_RECORD = 170 #PVR Backend Record
ACTION_SHOW_CODEC = 27
ACTION_ASPECT_RATIO = 19
ACTION_SHIFT = 118
#unused
ACTION_NEXT_ITEM = 14
ACTION_PREV_ITEM = 15
ACTION_STEP_FOWARD = 17
ACTION_STEP_BACK = 18
ACTION_BIG_STEP_FORWARD = 19
ACTION_BIG_STEP_BACK = 20
ACTION_PLAYER_FORWARD = 73
ACTION_PLAYER_REWIND = 74
ACTION_PLAYER_PLAY = 75
ACTION_PLAYER_PLAYPAUSE = 76
ACTION_TRIGGER_OSD = 243 #show autoclosing OSD. Can b used in videoFullScreen.xml window id=2005
ACTION_SHOW_MPLAYER_OSD = 83 #toggles mplayers OSD. Can be used in videofullscreen.xml window id=2005
ACTION_SHOW_OSD_TIME = 123 #displays current time, can be used in videoFullScreen.xml window id=2005
#ACTION_MENU = 117
ACTION_MENU = 7
ACTION_TELETEXT_RED = 215
ACTION_TELETEXT_GREEN = 216
ACTION_TELETEXT_YELLOW = 217
ACTION_TELETEXT_BLUE = 218
#define ACTION_VOLUME_UP 88
#define ACTION_VOLUME_DOWN 89
#define ACTION_MUTE 91
#define ACTION_VOLAMP_UP 93
#define ACTION_VOLAMP_DOWN 94
#define ACTION_CHANNEL_SWITCH 183 #last channel?
#define ACTION_TOGGLE_WATCHED 200 // Toggle watched status (videos)
#define ACTION_TOGGLE_DIGITAL_ANALOG 202 // switch digital <-> analog
#UTC XMLTV - XMLTV that uses UTC w/ Offset timing (not local time).
UTC_XMLTV = []
#Dynamic Artwork plugins - #Title format must be "Title (Year)" or "Title" or "Title - Episode"
DYNAMIC_PLUGIN_TV = ['plugin.video.simply.player', 'plugin.video.1channel', 'plugin.video.GOtv', 'plugin.video.genesis', 'PlayOn', 'UPNP', 'plugin.video.ororotv', 'plugin.video.F.T.V', 'plugin.video.salts']
DYNAMIC_PLUGIN_MOVIE = ['plugin.video.simply.player', 'plugin.video.1channel', 'plugin.video.iwannawatch', 'plugin.video.viooz.co', 'plugin.video.glowmovies.hd', 'plugin.video.genesis', 'plugin.video.yifymovies.hd', 'plugin.video.GOmovies', 'plugin.video.muchmovies.hd', 'plugin.video.cartoonhd', 'PlayOn', 'UPNP', 'plugin.video.F.T.V', 'plugin.video.salts']
# Plugin seek blacklist - Plugins that are known to use rtmp source which lockup xbmc during seek
BYPASS_SEEK = ['plugin.video.vevo_tv','plugin.video.g4tv','plugin.video.ustvnow', 'plugin.video.mystreamstv.beta']
# Bypass EPG (paused/stacked) by channel name - Removed "(Stacked)" from EPG
BYPASS_EPG = ['PseudoCinema']
# Bypass Overlay Coming up next by channel name - keep "ComingUp Next" from displaying
BYPASS_OVERLAY = ['PseudoCinema'] | gpl-3.0 |
JakeLowey/HackRPI2 | django/contrib/gis/gdal/prototypes/geom.py | 91 | 4762 | from ctypes import c_char_p, c_double, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import (const_string_output,
double_output, geom_output, int_output, srs_output, string_output, void_output)
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines, if supported.
if GEOJSON:
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True)
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True)
else:
from_json = False
to_json = False
to_kml = False
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)])
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p])
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| mit |
ryanj/origin | vendor/github.com/google/certificate-transparency/python/ct/client/db/sqlite_log_db.py | 35 | 6703 | import logging
import sqlite3
import time
from ct.client.db import log_db
from ct.client.db import database
from ct.proto import client_pb2
class SQLiteLogDB(log_db.LogDB):
def __init__(self, connection_manager):
"""Initialize the database and tables.
Args:
connection_manager: an SQLiteConnectionManager object."""
self.__mgr = connection_manager
with self.__mgr.get_connection() as conn:
# TODO(ekasper): give users control of table names via flags so
# we can explicitly avoid conflicts between database objects
# sharing the same underlying SQLiteConnection.
conn.execute("CREATE TABLE IF NOT EXISTS logs("
"id INTEGER PRIMARY KEY, log_server TEXT UNIQUE, "
"metadata BLOB)")
conn.execute("CREATE TABLE IF NOT EXISTS sths(log_id INTEGER, "
"fetch_timestamp INTEGER,"
"timestamp INTEGER, sth_data BLOB, "
"audit_info BLOB,"
"UNIQUE(log_id, timestamp, sth_data, audit_info) ON "
"CONFLICT IGNORE,"
"FOREIGN KEY(log_id) REFERENCES logs(id))")
conn.execute("CREATE INDEX IF NOT EXISTS sth_by_timestamp on sths("
"log_id, timestamp)")
self.__tables = ["logs", "sths"]
def __repr__(self):
return "%r(db: %r)" % (self.__class__.__name__, self.__mgr)
def __str__(self):
return "%s(db: %s, tables: %s): " % (self.__class__.__name__,
self.__mgr, self.__tables)
def __encode_log_metadata(self, metadata):
log_server = metadata.log_server
local_metadata = client_pb2.CtLogMetadata()
local_metadata.CopyFrom(metadata)
local_metadata.ClearField("log_server")
return log_server, sqlite3.Binary(local_metadata.SerializeToString())
def __decode_log_metadata(self, log_server, serialized_metadata):
metadata = client_pb2.CtLogMetadata()
metadata.ParseFromString(serialized_metadata)
metadata.log_server = log_server
return metadata
def add_log(self, metadata):
log_server, serialized_metadata = self.__encode_log_metadata(
metadata)
with self.__mgr.get_connection() as conn:
try:
conn.execute("INSERT INTO logs(log_server, metadata) "
"VALUES(?, ?)", (log_server, serialized_metadata))
except sqlite3.IntegrityError:
logging.warning("Ignoring duplicate log server %s", log_server)
def update_log(self, metadata):
log_server, serialized_metadata = self.__encode_log_metadata(
metadata)
with self.__mgr.get_connection() as conn:
conn.execute("INSERT OR REPLACE INTO logs(id, log_server, "
"metadata) VALUES((SELECT id FROM logs WHERE "
"log_server = ?), ?, ?) ", (log_server, log_server,
serialized_metadata))
def logs(self):
with self.__mgr.get_connection() as conn:
for log_server, metadata in conn.execute(
"SELECT log_server, metadata FROM logs"):
yield self.__decode_log_metadata(log_server, metadata)
def _get_log_id(self, conn, log_server):
res = conn.execute("SELECT id FROM logs WHERE log_server = ?",
(log_server,))
try:
log_id = res.next()
except StopIteration:
raise database.KeyError("Unknown log server: %s", log_server)
return log_id[0]
def get_log_id(self, log_server):
with self.__mgr.get_connection() as conn:
return self._get_log_id(conn, log_server)
def __encode_sth(self, audited_sth):
timestamp = audited_sth.sth.timestamp
sth = client_pb2.SthResponse()
sth.CopyFrom(audited_sth.sth)
sth.ClearField("timestamp")
audit = client_pb2.AuditInfo()
audit.CopyFrom(audited_sth.audit)
return (timestamp, sqlite3.Binary(sth.SerializeToString()),
sqlite3.Binary(audit.SerializeToString()))
def __decode_sth(self, sth_row):
_, _, timestamp, serialized_sth, serialized_audit = sth_row
audited_sth = client_pb2.AuditedSth()
audited_sth.sth.ParseFromString(serialized_sth)
audited_sth.sth.timestamp = timestamp
audited_sth.audit.ParseFromString(serialized_audit)
return audited_sth
# This ignores a duplicate STH even if the audit data differs.
# TODO(ekasper): add an update method for updating audit data, as needed.
def store_sth(self, log_server, audited_sth):
"""Store the STH in the database.
Will store the STH with a unique ID unless an exact copy already exists.
Note: the fetch_timestamp is time of calling this function, not actual
fetching timestamp.
Args:
log_server: the server name, i.e., the <log_server> path prefix
audited_sth: a client_pb2.AuditedSth proto
"""
timestamp, sth_data, audit_info = self.__encode_sth(audited_sth)
with self.__mgr.get_connection() as conn:
log_id = self._get_log_id(conn, log_server)
conn.execute("INSERT INTO sths(log_id, fetch_timestamp, timestamp, "
"sth_data, audit_info) VALUES(?, ?, ?, ?, ?)",
(log_id, int(time.time()), timestamp, sth_data, audit_info))
def get_latest_sth(self, log_server):
row = None
with self.__mgr.get_connection() as conn:
log_id = self._get_log_id(conn, log_server)
res = conn.execute("SELECT * FROM sths WHERE log_id = ? "
"ORDER BY timestamp DESC LIMIT 1", (log_id,))
try:
row = res.next()
except StopIteration:
pass
if row is not None:
return self.__decode_sth(row)
def scan_latest_sth_range(self, log_server, start=0,
end=log_db.LogDB.timestamp_max, limit=0):
sql_limit = -1 if not limit else limit
with self.__mgr.get_connection() as conn:
log_id = self._get_log_id(conn, log_server)
for row in conn.execute(
"SELECT * FROM sths WHERE log_id = ? "
"AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC "
"LIMIT ?", (log_id, start, end, sql_limit)):
yield self.__decode_sth(row)
| apache-2.0 |
scivey/mner | external/gmock-1.7.0/gtest/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
felixjimenez/django | tests/model_inheritance/tests.py | 40 | 13501 | from __future__ import absolute_import, unicode_literals
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase
from django.test.utils import CaptureQueriesContext
from django.utils import six
from .models import (Chef, CommonInfo, ItalianRestaurant, ParkingLot, Place,
Post, Restaurant, Student, StudentWorker, Supplier, Worker, MixinModel)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __unicode__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
w2 = Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(six.text_type(w1), "Worker Fred")
self.assertEqual(six.text_type(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertQuerysetEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
lambda o: o
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
self.assertRaises(AttributeError, lambda: CommonInfo.objects.all())
# A StudentWorker which does not exist is both a Student and Worker
# which does not exist.
self.assertRaises(Student.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
self.assertRaises(Worker.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
# MultipleObjectsReturned is also inherited.
# This is written out "long form", rather than using __init__/create()
# because of a bug with diamond inheritance (#10808)
sw1 = StudentWorker()
sw1.name = "Wilma"
sw1.age = 35
sw1.save()
sw2 = StudentWorker()
sw2.name = "Betty"
sw2.age = 24
sw2.save()
self.assertRaises(Student.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
self.assertRaises(Worker.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
def test_multiple_table(self):
post = Post.objects.create(title="Lorem Ipsum")
# The Post model has distinct accessors for the Comment and Link models.
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
self.assertRaises(AttributeError,
getattr, post, "attached_%(class)s_set"
)
# The Place/Restaurant/ItalianRestaurant models all exist as
# independent models. However, the subclasses also have transparent
# access to the fields of their ancestors.
# Create a couple of Places.
p1 = Place.objects.create(name="Master Shakes", address="666 W. Jersey")
p2 = Place.objects.create(name="Ace Harware", address="1013 N. Ashland")
# Test constructor for Restaurant.
r = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2
)
# Test the constructor for ItalianRestaurant.
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
ir.address = "1234 W. Elm"
ir.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
self.assertRaises(FieldError,
Restaurant.objects.filter, supplier__name="foo"
)
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
p.restaurant, Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
self.assertRaises(ItalianRestaurant.DoesNotExist,
lambda: p.restaurant.italianrestaurant
)
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
self.assertRaises(Place.DoesNotExist,
ItalianRestaurant.objects.get, name="The Noodle Void"
)
# MultipleObjectsReturned is also inherited.
self.assertRaises(Place.MultipleObjectsReturned,
Restaurant.objects.get, id__lt=12321
)
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers = [r, ir]
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
self.assertRaises(Restaurant.DoesNotExist,
lambda: p.restaurant
)
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
ir.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
park1 = ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
park2 = ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=ir
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=r.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
# The values() command also works on fields from parent models.
self.assertQuerysetEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"}
],
lambda o: o
)
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(2,
lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(1,
lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_select_related_defer(self):
"""
#23370 - Should be able to defer child fields when using
select_related() from parent to child.
"""
Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2,
)
ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
)
qs = (Restaurant.objects
.select_related("italianrestaurant")
.defer("italianrestaurant__serves_gnocchi")
.order_by("rating"))
# Test that the field was actually defered
with self.assertNumQueries(2):
objs = list(qs.all())
self.assertTrue(objs[1].italianrestaurant.serves_gnocchi)
# Test that model fields where assigned correct values
self.assertEqual(qs[0].name, 'Demon Dogs')
self.assertEqual(qs[0].rating, 2)
self.assertEqual(qs[1].italianrestaurant.name, 'Ristorante Miron')
self.assertEqual(qs[1].italianrestaurant.rating, 4)
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
def test_update_query_counts(self):
"""
Test that update queries do not generate non-necessary queries.
Refs #18304.
"""
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
with self.assertNumQueries(3):
ir.save()
def test_update_parent_filtering(self):
"""
Test that updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query.
Refs #10399
"""
supplier = Supplier.objects.create(
name='Central market',
address='610 some street'
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]['sql']
# Capture the queries executed when a subclassed model instance is saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=('name',))
for query in captured_queries:
sql = query['sql']
if 'UPDATE' in sql:
self.assertEqual(expected_sql, sql)
| bsd-3-clause |
numerigraphe/odoomrp-wip | mrp_product_variants/models/product.py | 19 | 1629 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields
class ProductAttribute(models.Model):
_inherit = 'product.attribute'
parent_inherited = fields.Boolean('Inherits from parent')
class ProductTemplate(models.Model):
_inherit = 'product.template'
def _get_product_attributes_inherit_dict(self, product_attribute_list):
product_attributes = self._get_product_attributes_dict()
for attr in product_attributes:
if self.env['product.attribute'].browse(
attr['attribute']).parent_inherited:
for attr_line in product_attribute_list:
if attr_line.attribute.id == attr['attribute']:
attr.update({'value': attr_line.value.id})
return product_attributes
| agpl-3.0 |
mapsme/omim | tools/python/maps_generator/checks/logs/logs_reader.py | 4 | 7172 | import datetime
import logging
import os
import re
from collections import Counter
from collections import namedtuple
from enum import Enum
from pathlib import Path
from typing import List
from typing import Tuple
from typing import Union
import maps_generator.generator.env as env
from maps_generator.generator.stages import get_stage_type
from maps_generator.utils.algo import parse_timedelta
logger = logging.getLogger(__name__)
FLAGS = re.MULTILINE | re.DOTALL
GEN_LINE_PATTERN = re.compile(
r"^LOG\s+TID\((?P<tid>\d+)\)\s+(?P<level>[A-Z]+)\s+"
r"(?P<timestamp>[-.e0-9]+)\s+(?P<message>.+)$",
FLAGS,
)
GEN_LINE_CHECK_PATTERN = re.compile(
r"^TID\((?P<tid>\d+)\)\s+" r"ASSERT FAILED\s+(?P<message>.+)$", FLAGS
)
MAPS_GEN_LINE_PATTERN = re.compile(
r"^\[(?P<time_string>[0-9-:, ]+)\]\s+(?P<level>\w+)\s+"
r"(?P<module>\w+)\s+(?P<message>.+)$",
FLAGS,
)
STAGE_START_MSG_PATTERN = re.compile(r"^Stage (?P<name>\w+): start ...$")
STAGE_FINISH_MSG_PATTERN = re.compile(
r"^Stage (?P<name>\w+): finished in (?P<duration_string>.+)$"
)
LogLine = namedtuple("LogLine", ["timestamp", "level", "tid", "message", "type"])
LogStage = namedtuple("LogStage", ["name", "duration", "lines"])
class LogType(Enum):
gen = 1
maps_gen = 2
class Log:
def __init__(self, path: str):
self.path = Path(path)
self.name = self.path.stem
self.is_stage_log = False
self.is_mwm_log = False
try:
get_stage_type(self.name)
self.is_stage_log = True
except AttributeError:
if self.name in env.COUNTRIES_NAMES or self.name in env.WORLDS_NAMES:
self.is_mwm_log = True
self.lines = self._parse_lines()
def _parse_lines(self) -> List[LogLine]:
logline = ""
state = None
lines = []
base_timestamp = 0.0
def try_parse_and_insert():
nonlocal logline
logline = logline.strip()
if not logline:
return
nonlocal base_timestamp
line = None
if state == LogType.gen:
line = Log._parse_gen_line(logline, base_timestamp)
elif state == LogType.maps_gen:
line = Log._parse_maps_gen_line(logline)
base_timestamp = line.timestamp
if line is not None:
lines.append(line)
else:
logger.warn(f"{self.name}: line was not parsed: {logline}")
logline = ""
with self.path.open() as logfile:
for line in logfile:
if line.startswith("LOG") or line.startswith("TID"):
try_parse_and_insert()
state = LogType.gen
elif line.startswith("["):
try_parse_and_insert()
state = LogType.maps_gen
logline += line
try_parse_and_insert()
return lines
@staticmethod
def _parse_gen_line(line: str, base_time: float = 0.0) -> LogLine:
m = GEN_LINE_PATTERN.match(line)
if m:
return LogLine(
timestamp=base_time + float(m["timestamp"]),
level=logging.getLevelName(m["level"]),
tid=int(m["tid"]),
message=m["message"],
type=LogType.gen,
)
m = GEN_LINE_CHECK_PATTERN.match(line)
if m:
return LogLine(
timestamp=None,
level=logging.getLevelName("CRITICAL"),
tid=None,
message=m["message"],
type=LogType.gen,
)
assert False, line
@staticmethod
def _parse_maps_gen_line(line: str) -> LogLine:
m = MAPS_GEN_LINE_PATTERN.match(line)
time_string = m["time_string"].split(",")[0]
timestamp = datetime.datetime.strptime(
time_string, logging.Formatter.default_time_format
).timestamp()
if m:
return LogLine(
timestamp=float(timestamp),
level=logging.getLevelName(m["level"]),
tid=None,
message=m["message"],
type=LogType.maps_gen,
)
assert False, line
class LogsReader:
def __init__(self, path: str):
self.path = os.path.abspath(os.path.expanduser(path))
def __iter__(self):
for filename in os.listdir(self.path):
if filename.endswith(".log"):
yield Log(os.path.join(self.path, filename))
def split_into_stages(log: Log) -> List[LogStage]:
log_stages = []
name = None
lines = []
for line in log.lines:
if line.message.startswith("Stage"):
m = STAGE_START_MSG_PATTERN.match(line.message)
if m:
if name is not None:
logger.warn(f"{log.name}: stage {name} has not finish line.")
log_stages.append(LogStage(name=name, duration=None, lines=lines))
name = m["name"]
m = STAGE_FINISH_MSG_PATTERN.match(line.message)
if m:
assert name == m["name"], line
duration = parse_timedelta(m["duration_string"])
log_stages.append(LogStage(name=name, duration=duration, lines=lines))
name = None
lines = []
else:
lines.append(line)
if name is not None:
logger.warn(f"{log.name}: stage {name} has not finish line.")
log_stages.append(LogStage(name=name, duration=None, lines=lines))
return log_stages
def _is_worse(lhs: LogStage, rhs: LogStage) -> bool:
if (lhs.duration is None) ^ (rhs.duration is None):
return lhs.duration is None
if len(rhs.lines) > len(lhs.lines):
return True
return rhs.duration > lhs.duration
def normalize_logs(llogs: List[LogStage]) -> List[LogStage]:
normalized_logs = []
buckets = {}
for log in llogs:
if log.name in buckets:
if _is_worse(normalized_logs[buckets[log.name]], log):
normalized_logs[buckets[log.name]] = log
else:
normalized_logs.append(log)
buckets[log.name] = len(normalized_logs) - 1
return normalized_logs
def count_levels(logs: Union[List[LogLine], LogStage]) -> Counter:
if isinstance(logs, list):
return Counter(log.level for log in logs)
if isinstance(logs, LogStage):
return count_levels(logs.lines)
assert False, f"Type {type(logs)} is unsupported."
def find_and_parse(
logs: Union[List[LogLine], LogStage], pattern: Union[str, type(re.compile(""))],
) -> List[Tuple[dict, str]]:
if isinstance(pattern, str):
pattern = re.compile(pattern, FLAGS)
if isinstance(logs, list):
found = []
for log in logs:
m = pattern.match(log.message)
if m:
found.append((m.groupdict(), log))
return found
if isinstance(logs, LogStage):
return find_and_parse(logs.lines, pattern)
assert False, f"Type {type(logs)} is unsupported."
| apache-2.0 |
jcpowermac/ansible | lib/ansible/module_utils/network/vyos/vyos.py | 16 | 5304 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import Connection
_DEVICE_CONFIGS = {}
vyos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
vyos_argument_spec = {
'provider': dict(type='dict', options=vyos_provider_spec),
}
vyos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
vyos_argument_spec.update(vyos_top_spec)
def get_provider_argspec():
return vyos_provider_spec
def get_connection(module):
if hasattr(module, '_vyos_connection'):
return module._vyos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._vyos_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._vyos_connection
def get_capabilities(module):
if hasattr(module, '_vyos_capabilities'):
return module._vyos_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._vyos_capabilities = json.loads(capabilities)
return module._vyos_capabilities
def get_config(module):
global _DEVICE_CONFIGS
if _DEVICE_CONFIGS != {}:
return _DEVICE_CONFIGS
else:
connection = get_connection(module)
out = connection.get_config()
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS = cfg
return cfg
def run_commands(module, commands, check_rc=True):
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
try:
cmd = json.loads(cmd)
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
except:
command = cmd
prompt = None
answer = None
out = connection.get(command, prompt, answer)
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def load_config(module, commands, commit=False, comment=None):
connection = get_connection(module)
out = connection.edit_config(commands)
diff = None
if module._diff:
out = connection.get('compare')
out = to_text(out, errors='surrogate_or_strict')
if not out.startswith('No changes'):
out = connection.get('show')
diff = to_text(out, errors='surrogate_or_strict').strip()
if commit:
try:
out = connection.commit(comment)
except:
connection.discard_changes()
module.fail_json(msg='commit failed: %s' % out)
if not commit:
connection.discard_changes()
else:
connection.get('exit')
if diff:
return diff
| gpl-3.0 |
web30s/odoo-9.0c-20160402 | hello/templates/openerp/addons/website_sale_options/models/sale_order.py | 39 | 3118 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.tools.translate import _
class sale_order_line(osv.Model):
_inherit = "sale.order.line"
_columns = {
'linked_line_id': fields.many2one('sale.order.line', 'Linked Order Line', domain="[('order_id','!=',order_id)]", ondelete='cascade'),
'option_line_ids': fields.one2many('sale.order.line', 'linked_line_id', string='Options Linked'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
linked_line_id = kwargs.get('linked_line_id')
optional_product_ids = kwargs.get('optional_product_ids')
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
domain += linked_line_id and [('linked_line_id', '=', linked_line_id)] or [('linked_line_id', '=', False)]
if optional_product_ids:
domain += [('option_line_ids.product_id', '=', pid) for pid in optional_product_ids]
else:
domain += [('option_line_ids', '=', False)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
value = super(sale_order, self)._cart_update(cr, uid, ids, product_id, line_id, add_qty, set_qty, context=context, **kwargs)
sol = self.pool.get('sale.order.line')
line = sol.browse(cr, SUPERUSER_ID, value.get('line_id'), context=context)
# link a product to the sale order
if kwargs.get('linked_line_id'):
linked_line_id = sol.browse(cr, SUPERUSER_ID, kwargs['linked_line_id'], context=context)
line.write({
"name": _("%s\nOption for: %s") % (line.name, linked_line_id.product_id.name_get()[0][1]),
"linked_line_id": linked_line_id.id
})
value['option_ids'] = set()
for so in self.browse(cr, uid, ids, context=context):
# select all optional products linked to the updated line
option_line_ids = [l for l in so.order_line if l.linked_line_id.id == line.id]
# update line
for option_line_id in option_line_ids:
super(sale_order, self)._cart_update(cr, uid, ids, option_line_id.product_id.id, option_line_id.id, add_qty, set_qty, context=context, **kwargs)
option_line_id.write({"name": _("%s\nOption for: %s") % (option_line_id.name, option_line_id.linked_line_id.product_id.name_get()[0][1])})
value['option_ids'].add(option_line_id.id)
value['option_ids'] = list(value['option_ids'])
return value
| gpl-3.0 |
ktdreyer/teuthology | scripts/report.py | 13 | 1704 | import docopt
import teuthology.report
doc = """
usage:
teuthology-report -h
teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] [-D] -r RUN ...
teuthology-report [-v] [-s SERVER] [-a ARCHIVE] [-D] -r RUN -j JOB ...
teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] --all-runs
Submit test results to a web service
optional arguments:
-h, --help show this help message and exit
-a ARCHIVE, --archive ARCHIVE
The base archive directory
[default: {archive_base}]
-r [RUN ...], --run [RUN ...]
A run (or list of runs) to submit
-j [JOB ...], --job [JOB ...]
A job (or list of jobs) to submit
--all-runs Submit all runs in the archive
-R, --refresh Re-push any runs already stored on the server. Note
that this may be slow.
-s SERVER, --server SERVER
"The server to post results to, e.g.
http://localhost:8080/ . May also be specified in
~/.teuthology.yaml as 'results_server'
-n, --no-save By default, when submitting all runs, we remember the
last successful submission in a file called
'last_successful_run'. Pass this flag to disable that
behavior.
-D, --dead Mark all given jobs (or entire runs) with status
'dead'. Implies --refresh.
-v, --verbose be more verbose
""".format(archive_base=teuthology.config.config.archive_base)
def main():
args = docopt.docopt(doc)
teuthology.report.main(args)
| mit |
Michagogo/bitcoin | qa/rpc-tests/abandonconflict.py | 53 | 7752 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import urllib.parse
class AbandonConflictTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| mit |
TomoakiNagahara/wakatime-unity | Editor/WakaTime/client/wakatime/packages/requests/models.py | 4 | 28443 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| cc0-1.0 |
thinkasoft/ProyectoRD-dev | l10n_ve_sale_purchase/model/stock.py | 1 | 2883 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
###############Credits######################################################
# Coded by: Humberto Arocha <humberto@openerp.com.ve>
# Maria Gabriela Quilarque <gabriela@vauxoo.com>
# Javier Duran <javier@vauxoo.com>
# Planified by: Nhomar Hernandez
# Finance by: Helados Gilda, C.A. http://heladosgilda.com.ve
# Audited by: Humberto Arocha humberto@openerp.com.ve
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
from openerp.tools import config
import time
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def action_invoice_create(self, cursor, user, ids, journal_id=False,group=False, type='out_invoice', context=None):
""" Function that adds the concept of retention to the invoice_lines from
a purchase order or sales order with billing method from picking list
"""
if context is None:
context = {}
data = super(stock_picking, self).action_invoice_create(cursor, user, ids, journal_id, group, type, context)
picking_id=data.keys()[0]
invoice_id=data[picking_id]
invoice_brw = self.pool.get('account.invoice').browse(cursor, user, invoice_id)
picking_brw=self.browse(cursor, user, picking_id)
invoice_line_obj = self.pool.get('account.invoice.line')
for l in invoice_brw.invoice_line:
invoice_line_obj.write(cursor, user, l.id, {'concept_id':
l.product_id and l.product_id.concept_id and l.product_id.concept_id.id or False})
return data
_columns = {
'nro_ctrl': fields.char('Invoice ref.', size=32, readonly=True, states={'draft':[('readonly',False)]}, help="Invoice reference"),
}
| agpl-3.0 |
cfriedt/gnuradio | gr-blocks/python/blocks/qa_tag_share.py | 12 | 2402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import pmt
class qa_tag_share(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_t(self):
# Constants
tag_key = 'in1_tag'
tag_value = 0
tag_offset = 0
in0_value = 1.0+1.0j
in1_value = 2.717
in0_data = (in0_value,)*10
in1_data = (in1_value,)*10
sink_data = in0_data
tag = gr.tag_t()
tag.key = pmt.to_pmt(tag_key)
tag.value = pmt.to_pmt(tag_value)
tag.offset = tag_offset
# Only tag Input 1 of the share block and see if it transfers
# to Output 0. Also verify that Input 0 stream is propagated to
# Output 0.
in0 = blocks.vector_source_c(in0_data, False, 1)
in1 = blocks.vector_source_f(in1_data, False, 1, (tag,))
tag_share = blocks.tag_share(gr.sizeof_gr_complex, gr.sizeof_float)
sink = blocks.vector_sink_c(1)
self.tb.connect(in0, (tag_share,0))
self.tb.connect(in1, (tag_share,1))
self.tb.connect(tag_share, sink)
self.tb.run()
self.assertEqual(len(sink.tags()), 1)
self.assertEqual(pmt.to_python(sink.tags()[0].key), tag_key)
self.assertEqual(pmt.to_python(sink.tags()[0].value), tag_value)
self.assertEqual(sink.tags()[0].offset, tag_offset)
self.assertEqual(sink.data(), sink_data)
if __name__ == '__main__':
gr_unittest.run(qa_tag_share, 'qa_tag_share.xml')
| gpl-3.0 |
kou/zulip | analytics/management/commands/stream_stats.py | 3 | 2358 | from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from zerver.models import Message, Realm, Recipient, Stream, Subscription, get_realm
class Command(BaseCommand):
help = "Generate statistics on the streams for a realm."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('realms', metavar='<realm>', nargs='*',
help="realm to generate statistics for")
def handle(self, *args: Any, **options: str) -> None:
if options['realms']:
try:
realms = [get_realm(string_id) for string_id in options['realms']]
except Realm.DoesNotExist as e:
raise CommandError(e)
else:
realms = Realm.objects.all()
for realm in realms:
streams = Stream.objects.filter(realm=realm).exclude(Q(name__istartswith="tutorial-"))
# private stream count
private_count = 0
# public stream count
public_count = 0
for stream in streams:
if stream.invite_only:
private_count += 1
else:
public_count += 1
print("------------")
print(realm.string_id, end=' ')
print("{:>10} {} public streams and".format("(", public_count), end=' ')
print(f"{private_count} private streams )")
print("------------")
print("{:>25} {:>15} {:>10} {:>12}".format("stream", "subscribers", "messages", "type"))
for stream in streams:
if stream.invite_only:
stream_type = 'private'
else:
stream_type = 'public'
print(f"{stream.name:>25}", end=' ')
recipient = Recipient.objects.filter(type=Recipient.STREAM, type_id=stream.id)
print("{:10}".format(len(Subscription.objects.filter(recipient=recipient,
active=True))), end=' ')
num_messages = len(Message.objects.filter(recipient=recipient))
print(f"{num_messages:12}", end=' ')
print(f"{stream_type:>15}")
print("")
| apache-2.0 |
jobiols/server-tools | mass_editing/wizard/mass_editing_wizard.py | 6 | 13417 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C):
# 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import orm
import openerp.tools as tools
from openerp.tools.translate import _
from lxml import etree
class MassEditingWizard(orm.TransientModel):
_name = 'mass.editing.wizard'
def fields_view_get(
self, cr, uid, view_id=None, view_type='form', context=None,
toolbar=False, submenu=False):
s_set = _("Set")
s_add = _("Add")
s_remove = _("Remove")
result = super(MassEditingWizard, self).fields_view_get(
cr, uid, view_id, view_type, context, toolbar, submenu)
if context.get('mass_editing_object'):
mass_object = self.pool['mass.object']
editing_data = mass_object.browse(
cr, uid, context.get('mass_editing_object'), context)
all_fields = {}
xml_form = etree.Element('form', {
'string': tools.ustr(editing_data.name), 'version': '7.0'})
xml_group = etree.SubElement(xml_form, 'group', {'colspan': '4'})
etree.SubElement(xml_group, 'label', {
'string': '', 'colspan': '2'})
xml_group = etree.SubElement(xml_form, 'group', {'colspan': '4',
'col': '4'})
model_obj = self.pool[context.get('active_model')]
field_info = model_obj.fields_get(cr, uid, [], context)
for field in editing_data.field_ids:
if field.ttype == "many2many":
all_fields[field.name] = field_info[field.name]
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [
('add', s_add), ('set', s_set),
('remove_m2m', s_remove)]}
xml_group = etree.SubElement(xml_group, 'group', {
'colspan': '4'})
etree.SubElement(xml_group, 'separator', {
'string': field_info[field.name]['string'],
'colspan': '2'})
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name,
'colspan': '2', 'nolabel': '1'})
etree.SubElement(xml_group, 'field', {
'name': field.name, 'colspan': '4', 'nolabel': '1',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove_m2m')]}")})
elif field.ttype == "one2many":
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [('set', s_set), ('remove', s_remove)]}
all_fields[field.name] = {
'type': field.ttype, 'string': field.field_description,
'relation': field.relation}
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name, 'colspan': '2'})
etree.SubElement(xml_group, 'field', {
'name': field.name, 'colspan': '4', 'nolabel': '1',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove_o2m')]}")})
elif field.ttype == "many2one":
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [('set', s_set), ('remove', s_remove)]}
all_fields[field.name] = {
'type': field.ttype, 'string': field.field_description,
'relation': field.relation}
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name, 'colspan': '2'})
etree.SubElement(xml_group, 'field', {
'name': field.name, 'nolabel': '1', 'colspan': '2',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove')]}")})
elif field.ttype == "char":
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [('set', s_set), ('remove', s_remove)]}
all_fields[field.name] = {
'type': field.ttype, 'string': field.field_description,
'size': field.size or 256}
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name,
'colspan': '2',
})
etree.SubElement(xml_group, 'field', {
'name': field.name, 'nolabel': '1',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove')]}"),
'colspan': '2'})
elif field.ttype == 'selection':
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [('set', s_set), ('remove', s_remove)]}
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name, 'colspan': '2'})
etree.SubElement(xml_group, 'field', {
'name': field.name, 'nolabel': '1', 'colspan': '2',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove')]}")})
all_fields[field.name] = {
'type': field.ttype,
'string': field.field_description,
'selection': field_info[field.name]['selection']}
else:
all_fields[field.name] = {
'type': field.ttype, 'string': field.field_description}
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [('set', s_set), ('remove', s_remove)]}
if field.ttype == 'text':
xml_group = etree.SubElement(xml_group, 'group', {
'colspan': '6'})
etree.SubElement(xml_group, 'separator', {
'string': all_fields[field.name]['string'],
'colspan': '2'})
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name,
'colspan': '2', 'nolabel': '1'})
etree.SubElement(xml_group, 'field', {
'name': field.name, 'colspan': '4', 'nolabel': '1',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove')]}")})
else:
all_fields["selection__" + field.name] = {
'type': 'selection',
'string': field_info[field.name]['string'],
'selection': [(
'set', s_set), ('remove', s_remove)]}
etree.SubElement(xml_group, 'field', {
'name': "selection__" + field.name,
'colspan': '2', })
etree.SubElement(xml_group, 'field', {
'name': field.name, 'nolabel': '1',
'attrs': (
"{'invisible':[('selection__" +
field.name + "','=','remove')]}"),
'colspan': '2', })
etree.SubElement(
xml_form, 'separator', {'string': '', 'colspan': '4'})
xml_group3 = etree.SubElement(xml_form, 'footer', {})
s_apply = _("Apply")
etree.SubElement(xml_group3, 'button', {
'string': s_apply, 'icon': "gtk-execute",
'type': 'object', 'name': "action_apply",
'class': "oe_highlight"})
s_close = _("Close")
etree.SubElement(xml_group3, 'button', {
'string': s_close, 'icon': "gtk-close", 'special': 'cancel'})
root = xml_form.getroottree()
result['arch'] = etree.tostring(root)
result['fields'] = all_fields
return result
def read(self, cr, uid, ids, fields, context=None):
""" Without this call, dynamic fields defined in fields_view_get()
generate a log warning, i.e.:
openerp.models: mass.editing.wizard.read()
with unknown field 'myfield'
openerp.models: mass.editing.wizard.read()
with unknown field 'selection__myfield'
"""
# We remove fields which are not in _columns
real_fields = [x for x in fields if x in self._columns]
return super(MassEditingWizard, self).read(
cr, uid, ids, real_fields, context=context)
def create(self, cr, uid, vals, context=None):
if context.get('active_model') and context.get('active_ids'):
model_obj = self.pool.get(context.get('active_model'))
model_field_obj = self.pool.get('ir.model.fields')
translation_obj = self.pool.get('ir.translation')
dict = {}
for key, val in vals.items():
if key.startswith('selection__'):
split_key = key.split('__', 1)[1]
if val == 'set':
dict.update({split_key: vals.get(split_key, False)})
elif val == 'remove':
dict.update({split_key: False})
# If field to remove is translatable,
# its translations have to be removed
model_field_id = model_field_obj.search(cr, uid, [
('model', '=', context.get('active_model')),
('name', '=', split_key)
])
if model_field_id and model_field_obj.browse(
cr, uid, model_field_id,
context=context).translate:
translation_ids = translation_obj.search(cr, uid, [
('res_id', 'in', context.get('active_ids')),
('type', '=', 'model'),
('name', '=', u"{0},{1}".format(
context.get('active_model'), split_key))])
translation_obj.unlink(cr, uid, translation_ids,
context=context)
elif val == 'remove_m2m':
dict.update({split_key: [
(3, id) for id in vals.get(
split_key, False)[0][2]]})
elif val == 'add':
m2m_list = []
for m2m_id in vals.get(split_key, False)[0][2]:
m2m_list.append((4, m2m_id))
dict.update({split_key: m2m_list})
if dict:
model_obj.write(
cr, uid, context.get('active_ids'), dict, context)
result = super(MassEditingWizard, self).create(cr, uid, {}, context)
return result
def action_apply(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
tuxlifan/moneyguru | qt/controller/budget/table.py | 2 | 1168 | # Created By: Virgil Dupras
# Created On: 2009-11-21
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt
from qtlib.column import Column
from ..table import Table, AMOUNT_PAINTER
class BudgetTable(Table):
COLUMNS = [
Column('start_date', 90),
Column('stop_date', 90),
Column('repeat_type', 80),
Column('interval', 50),
Column('account', 144),
Column('target', 144),
Column('amount', 100, alignment=Qt.AlignRight, painter=AMOUNT_PAINTER, resizeToFit=True),
]
def __init__(self, model, view):
Table.__init__(self, model, view)
self.view.sortByColumn(0, Qt.AscendingOrder) # sorted by start_date by default
self.view.deletePressed.connect(self.model.delete)
self.view.doubleClicked.connect(self.model.edit)
# we have to prevent Return from initiating editing.
self.view.editSelected = lambda: None
| gpl-3.0 |
takluyver/git-cola | cola/guicmds.py | 1 | 10597 | from __future__ import division, absolute_import, unicode_literals
import os
import re
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtCore import SIGNAL
from cola import cmds
from cola import core
from cola import difftool
from cola import gitcmds
from cola import qtutils
from cola import utils
from cola.git import git
from cola.i18n import N_
from cola.interaction import Interaction
from cola.models import main
from cola.widgets import completion
from cola.widgets.browse import BrowseDialog
from cola.widgets.selectcommits import select_commits
from cola.compat import ustr
def delete_branch():
"""Launch the 'Delete Branch' dialog."""
branch = choose_branch(N_('Delete Branch'), N_('Delete'))
if not branch:
return
cmds.do(cmds.DeleteBranch, branch)
def delete_remote_branch():
"""Launch the 'Delete Remote Branch' dialog."""
branch = choose_remote_branch(N_('Delete Remote Branch'), N_('Delete'))
if not branch:
return
rgx = re.compile(r'^(?P<remote>[^/]+)/(?P<branch>.+)$')
match = rgx.match(branch)
if match:
remote = match.group('remote')
branch = match.group('branch')
cmds.do(cmds.DeleteRemoteBranch, remote, branch)
def browse_current():
"""Launch the 'Browse Current Branch' dialog."""
branch = gitcmds.current_branch()
BrowseDialog.browse(branch)
def browse_other():
"""Prompt for a branch and inspect content at that point in time."""
# Prompt for a branch to browse
branch = choose_ref(N_('Browse Commits...'), N_('Browse'))
if not branch:
return
BrowseDialog.browse(branch)
def checkout_branch():
"""Launch the 'Checkout Branch' dialog."""
branch = choose_branch(N_('Checkout Branch'), N_('Checkout'))
if not branch:
return
cmds.do(cmds.CheckoutBranch, branch)
def cherry_pick():
"""Launch the 'Cherry-Pick' dialog."""
revs, summaries = gitcmds.log_helper(all=True)
commits = select_commits(N_('Cherry-Pick Commit'),
revs, summaries, multiselect=False)
if not commits:
return
cmds.do(cmds.CherryPick, commits)
def new_repo():
"""Prompt for a new directory and create a new Git repository
:returns str: repository path or None if no repository was created.
"""
dlg = QtGui.QFileDialog()
dlg.setFileMode(QtGui.QFileDialog.Directory)
dlg.setOption(QtGui.QFileDialog.ShowDirsOnly)
dlg.show()
dlg.raise_()
if dlg.exec_() != QtGui.QFileDialog.Accepted:
return None
paths = dlg.selectedFiles()
if not paths:
return None
path = ustr(paths[0])
if not path:
return None
# Avoid needlessly calling `git init`.
if git.is_git_dir(path):
# We could prompt here and confirm that they really didn't
# mean to open an existing repository, but I think
# treating it like an "Open" is a sensible DWIM answer.
return path
status, out, err = core.run_command(['git', 'init', path])
if status == 0:
return path
else:
title = N_('Error Creating Repository')
msg = (N_('"%(command)s" returned exit status %(status)d') %
dict(command='git init %s' % path, status=status))
details = N_('Output:\n%s') % out
if err:
details += '\n\n'
details += N_('Errors: %s') % err
qtutils.critical(title, msg, details)
return None
def open_new_repo():
dirname = new_repo()
if not dirname:
return
cmds.do(cmds.OpenRepo, dirname)
def prompt_for_clone():
"""
Present a GUI for cloning a repository.
Returns the target directory and URL
"""
url, ok = qtutils.prompt(N_('Path or URL to clone (Env. $VARS okay)'))
url = utils.expandpath(url)
if not ok or not url:
return None
try:
# Pick a suitable basename by parsing the URL
newurl = url.replace('\\', '/').rstrip('/')
default = newurl.rsplit('/', 1)[-1]
if default == '.git':
# The end of the URL is /.git, so assume it's a file path
default = os.path.basename(os.path.dirname(newurl))
if default.endswith('.git'):
# The URL points to a bare repo
default = default[:-4]
if url == '.':
# The URL is the current repo
default = os.path.basename(core.getcwd())
if not default:
raise
except:
Interaction.information(
N_('Error Cloning'),
N_('Could not parse Git URL: "%s"') % url)
Interaction.log(N_('Could not parse Git URL: "%s"') % url)
return None
# Prompt the user for a directory to use as the parent directory
msg = N_('Select a parent directory for the new clone')
dirname = qtutils.opendir_dialog(msg, main.model().getcwd())
if not dirname:
return None
count = 1
destdir = os.path.join(dirname, default)
olddestdir = destdir
if core.exists(destdir):
# An existing path can be specified
msg = (N_('"%s" already exists, cola will create a new directory') %
destdir)
Interaction.information(N_('Directory Exists'), msg)
# Make sure the new destdir doesn't exist
while core.exists(destdir):
destdir = olddestdir + str(count)
count += 1
return url, destdir
def export_patches():
"""Run 'git format-patch' on a list of commits."""
revs, summaries = gitcmds.log_helper()
to_export = select_commits(N_('Export Patches'), revs, summaries)
if not to_export:
return
cmds.do(cmds.FormatPatch, reversed(to_export), reversed(revs))
def diff_expression():
"""Diff using an arbitrary expression."""
tracked = gitcmds.tracked_branch()
current = gitcmds.current_branch()
if tracked and current:
ref = tracked + '..' + current
else:
ref = 'origin/master..'
difftool.diff_expression(qtutils.active_window(), ref)
def open_repo():
dirname = qtutils.opendir_dialog(N_('Open Git Repository...'),
main.model().getcwd())
if not dirname:
return
cmds.do(cmds.OpenRepo, dirname)
def open_repo_in_new_window():
"""Spawn a new cola session."""
dirname = qtutils.opendir_dialog(N_('Open Git Repository...'),
main.model().getcwd())
if not dirname:
return
cmds.do(cmds.OpenNewRepo, dirname)
def load_commitmsg():
"""Load a commit message from a file."""
filename = qtutils.open_file(N_('Load Commit Message'),
directory=main.model().getcwd())
if filename:
cmds.do(cmds.LoadCommitMessageFromFile, filename)
def choose_from_dialog(get, title, button_text, default):
parent = qtutils.active_window()
return get(title, button_text, parent, default=default)
def choose_ref(title, button_text, default=None):
return choose_from_dialog(completion.GitRefDialog.get,
title, button_text, default)
def choose_branch(title, button_text, default=None):
return choose_from_dialog(completion.GitBranchDialog.get,
title, button_text, default)
def choose_remote_branch(title, button_text, default=None):
return choose_from_dialog(completion.GitRemoteBranchDialog.get,
title, button_text, default)
def review_branch():
"""Diff against an arbitrary revision, branch, tag, etc."""
branch = choose_ref(N_('Select Branch to Review'), N_('Review'))
if not branch:
return
merge_base = gitcmds.merge_base_parent(branch)
difftool.diff_commits(qtutils.active_window(), merge_base, branch)
class TaskRunner(QtCore.QObject):
"""Runs QRunnable instances and transfers control when they finish"""
def __init__(self, parent):
QtCore.QObject.__init__(self, parent)
self.tasks = []
self.task_details = {}
self.connect(self, Task.FINISHED, self.finish)
def start(self, task, progress=None, finish=None):
"""Start the task and register a callback"""
if progress is not None:
progress.show()
# prevents garbage collection bugs in certain PyQt4 versions
self.tasks.append(task)
task_id = id(task)
self.task_details[task_id] = (progress, finish)
QtCore.QThreadPool.globalInstance().start(task)
def finish(self, task, *args, **kwargs):
task_id = id(task)
try:
self.tasks.remove(task)
except:
pass
try:
progress, finish = self.task_details[task_id]
del self.task_details[task_id]
except KeyError:
finish = progress = None
if progress is not None:
progress.hide()
if finish is not None:
finish(task, *args, **kwargs)
class Task(QtCore.QRunnable):
"""Base class for concrete tasks"""
FINISHED = SIGNAL('finished')
def __init__(self, sender):
QtCore.QRunnable.__init__(self)
self.sender = sender
def finish(self, *args, **kwargs):
self.sender.emit(self.FINISHED, self, *args, **kwargs)
class CloneTask(Task):
"""Clones a Git repository"""
def __init__(self, sender, url, destdir, spawn):
Task.__init__(self, sender)
self.url = url
self.destdir = destdir
self.spawn = spawn
self.cmd = None
def run(self):
"""Runs the model action and captures the result"""
self.cmd = cmds.do(cmds.Clone, self.url, self.destdir,
spawn=self.spawn)
self.finish()
def clone_repo(task_runner, progress, finish, spawn):
"""Clone a repostiory asynchronously with progress animation"""
result = prompt_for_clone()
if result is None:
return
# Use a thread to update in the background
url, destdir = result
progress.set_details(N_('Clone Repository'),
N_('Cloning repository at %s') % url)
task = CloneTask(task_runner, url, destdir, spawn)
task_runner.start(task,
finish=finish,
progress=progress)
def report_clone_repo_errors(task):
"""Report errors from the clone task if they exist"""
if task.cmd is None or task.cmd.ok:
return
Interaction.critical(task.cmd.error_message,
message=task.cmd.error_message,
details=task.cmd.error_details)
| gpl-2.0 |
Chibin/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/package/metadata_track/test_mdt.py | 9 | 2365 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.lib.PSQL import PSQL
from mpp.lib.GPFDIST import GPFDISTError
from tinctest.lib import local_path,run_shell_command
from mpp.models import SQLTestCase
from mpp.gpdb.tests.package.metadata_track import MDT
from mpp.lib.gppkg.gppkg import Gppkg
mdt = MDT()
class MDTSQLTestCase(SQLTestCase):
"""
@optimizer_mode off
@tags gppkg
"""
sql_dir = 'sql/'
ans_dir = 'expected'
out_dir = 'output/'
@classmethod
def setUpClass(cls):
"""
Checking if plperl package installed, otherwise install the package
"""
super(MDTSQLTestCase, cls).setUpClass()
mdt.pre_process_sql()
mdt.pre_process_ans()
mdt.setup_gpfdist()
cmd = 'gpssh --version'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'check product version', res)
gppkg = Gppkg()
product_version = res['stdout']
gppkg.gppkg_install(product_version, 'plperl')
setup_user = 'create role mdt_user superuser login;'
setup_db = 'create database mdt_db;'
setup_sql = local_path('sql/setup/setup.sql')
setup_output = local_path('output/setup/setup.out')
PSQL.run_sql_command(sql_cmd=setup_user, dbname=os.environ.get('PGDATABASE'))
PSQL.run_sql_command(sql_cmd=setup_db, dbname=os.environ.get('PGDATABASE'), username='mdt_user')
PSQL.run_sql_file(sql_file = setup_sql, out_file=setup_output, dbname='mdt_db', username='mdt_user')
@classmethod
def tearDownClass(cls):
try:
mdt.cleanup_gpfdist()
except GPFDISTError:
tinctest.logger.error("Unable to clenup gpfdist process")
| apache-2.0 |
russel1237/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
lah7/razer-drivers | pylib/openrazer/client/macro.py | 2 | 6267 | import json as _json
import dbus as _dbus
import openrazer_daemon.misc.macro as _daemon_macro
from openrazer_daemon import keyboard
class RazerMacro(object):
def __init__(self, serial: str, devname: str, daemon_dbus=None, capabilities=None):
if daemon_dbus is None:
session_bus = _dbus.SessionBus()
daemon_dbus = session_bus.get_object("org.razer", "/org/razer/device/{0}".format(serial))
if capabilities is None:
self._capabilities = {}
else:
self._capabilities = capabilities
self._macro_dbus = _dbus.Interface(daemon_dbus, "razer.device.macro")
self._macro_enabled = False
self.name = devname
def get_macros(self) -> dict:
json_payload = self._macro_dbus.getMacros()
macro_structure = _json.loads(json_payload)
macro_key_mapping = {}
for bind_key, macro_list in macro_structure.items():
macro_objects = []
for macro_dict in macro_list:
macro_obj = _daemon_macro.macro_dict_to_obj(macro_dict)
macro_objects.append(macro_obj)
macro_key_mapping[bind_key] = macro_objects
return macro_key_mapping
def add_macro(self, bind_key: str, macro_object_sequence: list):
"""
Add macro to specified bind key
:param bind_key: Bind Key (has to be in openrazer.keyboard.KEY_MAPPING)
:type bind_key: str
:param macro_object_sequence: List of macro objects
:type macro_object_sequence: list or tuple or __daemon_macro.MacroObject
"""
if isinstance(macro_object_sequence, _daemon_macro.MacroObject):
macro_object_sequence = [macro_object_sequence]
if not isinstance(macro_object_sequence, (tuple, list)):
raise ValueError("macro_object_sequence is not iterable")
macro_list = []
for macro_obj in macro_object_sequence:
if not isinstance(macro_obj, _daemon_macro.MacroObject):
raise ValueError("{0} is not a macro object".format(str(macro_obj)))
macro_list.append(macro_obj.to_dict())
json_payload = _json.dumps(macro_list)
self._macro_dbus.addMacro(bind_key, json_payload)
def del_macro(self, bind_key: str):
key_map = keyboard.KEY_MAPPING
map_str = "keyboard.KEY_MAPPING"
if self.name in ["Razer Orbweaver", "Razer Orbweaver Chroma"]:
key_map = keyboard.ORBWEAVER_KEY_MAPPING
map_str = "keyboard.ORBWEAVER_KEY_MAPPING"
elif self.name in ["Razer Tartarus", "Razer Tartarus Chroma", "Razer Nostromo"]:
key_map = keyboard.TARTARUS_KEY_MAPPING
map_str = "keyboard.TARTARUS_KEY_MAPPING"
elif self.name in ["Razer Naga Hex V2", "Razer Naga Chroma"]:
key_map = keyboard.NAGA_HEX_V2_KEY_MAPPING
map_str = "keyboard.NAGA_HEX_V2_KEY_MAPPING"
if bind_key not in key_map:
raise ValueError("Key {0} is not in openrazer.{1}".format(bind_key, map_str))
else:
self._macro_dbus.deleteMacro(bind_key)
@property
def mode_modifier(self):
if 'macro_mode_modifier' in self._capabilities:
return self._macro_dbus.getModeModifier()
return False
@mode_modifier.setter
def mode_modifier(self, value):
if 'macro_mode_modifier' in self._capabilities and isinstance(value, bool):
self._macro_dbus.getModeModifier(value)
@staticmethod
def create_url_macro_item(url: str) -> _daemon_macro.MacroURL:
"""
Create a macro object that opens a URL in a browser
:param url: URL
:type url: str
:return: Macro object
:rtype: _daemon_macro.MacroURL
"""
return _daemon_macro.MacroURL(url)
@staticmethod
def create_script_macro_item(script_path: str, script_args: str = None) -> _daemon_macro.MacroScript:
"""
Create a macro object that runs a script
The arguments to the script should be a string containing all the arguments, if any values contain spaces they should be quoted accordingly
:param script_path: Script filepath, includes script name
:type script_path: str
:param script_args: Script arguments
:type script_args: str or None
:return: Macro object
:rtype: _daemon_macro.MacroScript
"""
return _daemon_macro.MacroScript(script_path, script_args)
@staticmethod
def create_keypress_up_macro_item(key_name: str, pre_pause: int = 0) -> _daemon_macro.MacroKey:
"""
Create a macro action that consists of a key release event
:param key_name: Key Name, compatible with XTE
:type key_name: str
:param pre_pause: Optional delay before key is actioned (if turned on in daemon)
:type pre_pause: int
:return: Macro Key
:rtype: _daemon_macro.MacroKey
"""
return _daemon_macro.MacroKey(key_name, pre_pause, 'UP')
@staticmethod
def create_keypress_down_macro_item(key_name: str, pre_pause: int = 0) -> _daemon_macro.MacroKey:
"""
Create a macro action that consists of a key press event
:param key_name: Key Name, compatible with XTE
:type key_name: str
:param pre_pause: Optional delay before key is actioned (if turned on in daemon)
:type pre_pause: int
:return: Macro Key
:rtype: _daemon_macro.MacroKey
"""
return _daemon_macro.MacroKey(key_name, pre_pause, 'DOWN')
@classmethod
def create_keypress_macro_item(cls, key_name: str, pre_pause: int = 0) -> list:
"""
Create a macro action that consists of a key press and release event
The pre_pause delay will be applied to both key events
:param key_name: Key Name, compatible with XTE
:type key_name: str
:param pre_pause: Optional delay before key is actioned (if turned on in daemon)
:type pre_pause: int
:return: Macro Key
:rtype: list of _daemon_macro.MacroKey
"""
return [cls.create_keypress_down_macro_item(key_name, pre_pause), cls.create_keypress_up_macro_item(key_name, pre_pause)]
| gpl-2.0 |
mariopro/youtube-dl | youtube_dl/extractor/yinyuetai.py | 132 | 1907 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class YinYueTaiIE(InfoExtractor):
IE_NAME = 'yinyuetai:video'
IE_DESC = '音悦Tai'
_VALID_URL = r'https?://v\.yinyuetai\.com/video(?:/h5)?/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://v.yinyuetai.com/video/2322376',
'md5': '6e3abe28d38e3a54b591f9f040595ce0',
'info_dict': {
'id': '2322376',
'ext': 'mp4',
'title': '少女时代_PARTY_Music Video Teaser',
'creator': '少女时代',
'duration': 25,
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
'url': 'http://v.yinyuetai.com/video/h5/2322376',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://ext.yinyuetai.com/main/get-h-mv-info?json=true&videoId=%s' % video_id, video_id,
'Downloading mv info')['videoInfo']['coreVideoInfo']
if info['error']:
raise ExtractorError(info['errorMsg'], expected=True)
formats = [{
'url': format_info['videoUrl'],
'format_id': format_info['qualityLevel'],
'format': format_info.get('qualityLevelName'),
'filesize': format_info.get('fileSize'),
# though URLs ends with .flv, the downloaded files are in fact mp4
'ext': 'mp4',
'tbr': format_info.get('bitrate'),
} for format_info in info['videoUrlModels']]
self._sort_formats(formats)
return {
'id': video_id,
'title': info['videoName'],
'thumbnail': info.get('bigHeadImage'),
'creator': info.get('artistNames'),
'duration': info.get('duration'),
'formats': formats,
}
| unlicense |
vanwinkeljan/rpi_vmcsx | middleware/khronos/tools/reorg/khrn_reorg.py | 2 | 7397 | import os
import re
import stat
import subprocess
mapping = {}
re_line = re.compile('(.+) (.+)')
def fix_includes(outputpath, inputpath, srcpath):
input = open(inputpath, 'r')
lines = input.readlines()
input.close()
output = reallyopen(outputpath, 'w')
filename = os.path.basename(mapping[srcpath])
filedir = os.path.dirname(srcpath)+'/'
re_include = re.compile('^( *[#.])include "(.*)"(.*)\n$')
re_ifndef = re.compile('^#ifndef ([^_]\w+_H)\n$')
re_define = re.compile('^#define ([^_]\w+_H)\n$')
last_ifndef = None
for line in lines:
match = re_include.match(line)
if match:
indent = match.group(1)
path = match.group(2)
comment = match.group(3)
oldpath = path
if os.path.normpath(filedir + path).replace('\\','/') in mapping:
# Turn relative path into absolute.
path = filedir + path
path = os.path.normpath(path)
npath = path
path = path.replace('\\','/')
rpath = path
if path in mapping:
path = mapping[path]
if path not in ['rpc_platform.h','khronos_platform_types.h','khrn_platform_interlock.h','vg_platform_config.h','vg_platform_scissor.h','vg_platform_path.h','vg_platform_ramp.h'] and ('/' not in path or path.find('..') != -1):
print "NO " + os.path.normpath(filedir + path).replace('\\','/')
print "["+filedir+"]"+oldpath + " -> " + npath + " -> " + rpath + " -> " + path
assert(0)
line = indent + 'include "' + path + '"' + comment + '\n'
if last_ifndef != None:
match = re_define.match(line)
if match:
token = filename.upper().replace('.','_')
line = "#ifndef "+token+"\n#define "+token+"\n"
else:
line = "#ifndef "+last_ifndef+"\n"+line
last_ifndef = None
match = re_ifndef.match(line)
if match:
last_ifndef = match.group(1)
line = ''
output.write(line)
assert(last_ifndef == None)
def read_mapping(file):
result = {}
for line in file:
match = re_line.match(line)
assert(match)
name0 = match.group(1)
name1 = match.group(2)
if name0 in result or name1 in result.values():
print name0
assert(0)
result[name0] = name1
return result
def p4_move():
subprocess.call(['p4','edit','middleware/khronos/...'])
for src in mapping:
dst = mapping[src]
subprocess.call(['p4','move',src,dst])
def new_file_change_include():
for src in mapping:
dst = mapping[src]
print dst
assert(dst != src)
fix_includes(dst, src, src)
def in_place_change_include():
for src in mapping:
dst = mapping[src]
print dst
fix_includes(dst, dst, src)
def reallyopen(path, mode):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
return open(path, mode)
def makefile(sfile,dfile,cfile):
sdirs = []
snames = []
cdirs = []
cnames = []
for src in mapping:
dst = mapping[src]
(blah, ext) = os.path.splitext(dst)
if ((dst.find('2707')==-1 and dst.find('_cr')==-1 and dst.find('linux')==-1 and dst.find('win32')==-1 and dst.find('vcos')==-1 and dst.find('selfhosted')==-1 and dst.find('direct/')==-1 and dst.find('wf')==-1) and dst.find('dispatch')==-1 and dst.find('khrn_misc')==-1 and dst.find('egl_brcm_global_image_id')==-1 and dst.find('khrn_int_generic_map.c')==-1 and dst.find('khrn_visual_stats')==-1 or dst in ['middleware/khronos/glxx/glxx_server_cr.c','middleware/khronos/glsl/2707b/glsl_fpu3.c']) and ext not in ['.h','.inc','.qinc','.qasm','.l','.y','.mk','.bat','.arm']:
dir = os.path.dirname(dst) + '/'
name = os.path.basename(dst)
assert(name not in snames)
assert(name not in cnames)
if dir.startswith('middleware/khronos/'):
dir = dir[19:]
if dir not in sdirs:
sdirs.append(dir)
snames.append(name)
elif dir.startswith('interface/khronos/'):
dir = dir[18:]
if dir not in cdirs:
cdirs.append(dir)
cnames.append(name)
else:
print dir
assert(0)
sdirs.sort()
snames.sort()
cdirs.sort()
cnames.sort()
sfile.write('LIB_NAME := khronos_main\nLIB_SRC :=\nLIB_VPATH :=\n')
sfile.write('LIB_CFLAGS := $(WARNINGS_ARE_ERRORS)\nLIB_AFLAGS := $(ASM_WARNINGS_ARE_ERRORS)\nDEFINES_GLOBAL += EGL_SERVER_SMALLINT\n')
sfile.write('LIB_VPATH := ')
for dir in sdirs:
if dir != '':
sfile.write(dir[:-1] + ' ')
sfile.write('\nLIB_SRC := ')
for name in snames:
sfile.write(name + ' ')
sfile.write('\nLIB_IPATH := \n')
dfile.write('LIB_NAME := khronos_direct\nLIB_SRC :=\nLIB_VPATH :=\n')
dfile.write('LIB_CFLAGS := $(WARNINGS_ARE_ERRORS)\nLIB_AFLAGS := $(ASM_WARNINGS_ARE_ERRORS)\nDEFINES_GLOBAL += EGL_SERVER_SMALLINT\nLIB_DEFINES := RPC_DIRECT\n')
dfile.write('LIB_LIBS := middleware/khronos/khronos_main\n')
dfile.write('LIB_VPATH := ../../interface/khronos/platform/direct common ')
for dir in cdirs:
dfile.write(('../../interface/khronos/' + dir)[:-1] + ' ')
dfile.write('\nLIB_SRC := khrn_client_direct.c khrn_misc.c ')
for name in cnames:
dfile.write(name + ' ')
dfile.write('\nLIB_IPATH := \n')
cfile.write('LIB_NAME := khronos_client\nLIB_SRC :=\nLIB_VPATH :=\n')
cfile.write('LIB_CFLAGS := $(WARNINGS_ARE_ERRORS)\nLIB_AFLAGS := $(ASM_WARNINGS_ARE_ERRORS)\n')
cfile.write('LIB_VPATH := ../../../../interface/khronos/platform/selfhosted ../../../../interface/khronos/platform/vcos ../../interface/khronos/common ../../interface/khronos/egl ../../interface/khronos/ext ../../interface/khronos/glxx ../../interface/khronos/vg ')
for dir in cdirs:
cfile.write(('../../../../interface/khronos/' + dir)[:-1] + ' ')
cfile.write('\nLIB_SRC := khrn_client_selfhosted.c khrn_client_rpc_selfhosted.c khrn_client_vcos.c ')
for name in cnames:
cfile.write(name + ' ')
cfile.write('\nLIB_IPATH := \n')
def full_listing(path):
result = []
for filename in os.listdir(path):
fullname = path + filename
mode = os.stat(fullname)[stat.ST_MODE]
if (stat.S_ISDIR(mode)):
full_listing(fullname + '/')
else:
result.append(path + filename)
result.sort()
return result
def show_missing_files():
files = full_listing('middleware/khronos/')
for f in files:
if f not in mapping:
print "MISSING " + f
def branch_spec(srcpath, dstpath, m2):
for src in mapping:
dst = dstpath + mapping[src]
if m2 and 'xxx/'+src in m2:
src = m2['xxx/'+src]
else:
src = srcpath + src
print src + ' ' + dst
os.chdir('../../../../')
mapping = read_mapping(open('middleware/khronos/tools/reorg/mapping.txt','r'))
#new_file_change_include()
#show_missing_files()
#makefile(open('middleware/khronos/khronos_main.mk','w'),open('middleware/khronos/khronos_direct.mk','w'),open('middleware/khronos/client/vcfw/khronos_client.mk','w'))
#p4_move()
#in_place_change_include()
hauxwell_mapping = read_mapping(open('middleware/khronos/tools/reorg/hauxwell_mapping.txt','r'))
branch_spec('//software/projects/hauxwell/vc4_to_ansi/','//software/vc4/DEV/', hauxwell_mapping)
| apache-2.0 |
NorthernStars/python-mrlib | python-mrLib/mrLib/networking/data/networkhandshake.py | 1 | 13795 | # ./networkhandshake.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:e92452c8d3e28a9e27abfc9994d2007779e7f4c9
# Generated 2013-11-11 12:06:59.516575 by PyXB version 1.2.3
# Namespace AbsentNamespace0
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:629e6ecc-4ac1-11e3-8d9e-0016e6870683')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.3'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.CreateAbsentNamespace()
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, unicode):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type connectionAcknowlege with content type ELEMENT_ONLY
class connectionAcknowlege (pyxb.binding.basis.complexTypeDefinition):
"""Complex type connectionAcknowlege with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'connectionAcknowlege')
_XSDLocation = pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 10, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element servername uses Python identifier servername
__servername = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'servername'), 'servername', '__AbsentNamespace0_connectionAcknowlege_servername', False, pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 12, 6), )
servername = property(__servername.value, __servername.set, None, None)
# Element clientname uses Python identifier clientname
__clientname = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'clientname'), 'clientname', '__AbsentNamespace0_connectionAcknowlege_clientname', False, pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 13, 6), )
clientname = property(__clientname.value, __clientname.set, None, None)
# Element connectionallowed uses Python identifier connectionallowed
__connectionallowed = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'connectionallowed'), 'connectionallowed', '__AbsentNamespace0_connectionAcknowlege_connectionallowed', False, pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 14, 6), )
connectionallowed = property(__connectionallowed.value, __connectionallowed.set, None, None)
_ElementMap.update({
__servername.name() : __servername,
__clientname.name() : __clientname,
__connectionallowed.name() : __connectionallowed
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'connectionAcknowlege', connectionAcknowlege)
# Complex type connectionRequest with content type ELEMENT_ONLY
class connectionRequest (pyxb.binding.basis.complexTypeDefinition):
"""Complex type connectionRequest with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'connectionRequest')
_XSDLocation = pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 18, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element clientname uses Python identifier clientname
__clientname = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'clientname'), 'clientname', '__AbsentNamespace0_connectionRequest_clientname', False, pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 20, 6), )
clientname = property(__clientname.value, __clientname.set, None, None)
_ElementMap.update({
__clientname.name() : __clientname
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'connectionRequest', connectionRequest)
# Complex type connectionEstablished with content type EMPTY
class connectionEstablished (pyxb.binding.basis.complexTypeDefinition):
"""Complex type connectionEstablished with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'connectionEstablished')
_XSDLocation = pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 24, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'connectionEstablished', connectionEstablished)
connectionacknowlege = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'connectionacknowlege'), connectionAcknowlege, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 4, 2))
Namespace.addCategoryObject('elementBinding', connectionacknowlege.name().localName(), connectionacknowlege)
connectionestablished = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'connectionestablished'), connectionEstablished, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 6, 2))
Namespace.addCategoryObject('elementBinding', connectionestablished.name().localName(), connectionestablished)
connectionrequest = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'connectionrequest'), connectionRequest, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 8, 2))
Namespace.addCategoryObject('elementBinding', connectionrequest.name().localName(), connectionrequest)
connectionAcknowlege._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'servername'), pyxb.binding.datatypes.string, scope=connectionAcknowlege, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 12, 6)))
connectionAcknowlege._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'clientname'), pyxb.binding.datatypes.string, scope=connectionAcknowlege, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 13, 6)))
connectionAcknowlege._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'connectionallowed'), pyxb.binding.datatypes.boolean, scope=connectionAcknowlege, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 14, 6)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 12, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 13, 6))
counters.add(cc_1)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(connectionAcknowlege._UseForTag(pyxb.namespace.ExpandedName(None, u'servername')), pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 12, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(connectionAcknowlege._UseForTag(pyxb.namespace.ExpandedName(None, u'clientname')), pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 13, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
symbol = pyxb.binding.content.ElementUse(connectionAcknowlege._UseForTag(pyxb.namespace.ExpandedName(None, u'connectionallowed')), pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 14, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
connectionAcknowlege._Automaton = _BuildAutomaton()
connectionRequest._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'clientname'), pyxb.binding.datatypes.string, scope=connectionRequest, location=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 20, 6)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1, metadata=pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 20, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(connectionRequest._UseForTag(pyxb.namespace.ExpandedName(None, u'clientname')), pyxb.utils.utility.Location('/home/northernstars/git/python-mrlib/python-mrLib/mrLib/networking/data/networkhandshakeschema.xsd', 20, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
connectionRequest._Automaton = _BuildAutomaton_()
| apache-2.0 |
meredith-digops/ansible | lib/ansible/modules/files/unarchive.py | 21 | 34901 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2016, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: unarchive
version_added: 1.4
short_description: Unpacks an archive after (optionally) copying it from the local machine.
extends_documentation_fragment: [files, decrypt]
description:
- The C(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking.
Set remote_src=yes to unpack an archive which already exists on the target.
options:
src:
description:
- If remote_src=no (default), local path to archive file to copy to the target server; can be absolute or relative. If remote_src=yes, path on the
target server to existing archive file to unpack.
- If remote_src=yes and src contains ://, the remote machine will download the file from the url first. (version_added 2.0). This is only for
simple cases, for full download support look at the M(get_url) module.
required: true
default: null
dest:
description:
- Remote absolute path where the archive should be unpacked
required: true
default: null
copy:
description:
- "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine."
- "This option has been deprecated in favor of C(remote_src)"
- "This option is mutually exclusive with C(remote_src)."
required: false
choices: [ "yes", "no" ]
default: "yes"
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
version_added: "1.6"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
required: false
default: []
version_added: "2.1"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
required: false
default: no
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
default:
required: false
version_added: "2.1"
remote_src:
description:
- "Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller."
- "This option is mutually exclusive with C(copy)."
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https url as the source of the file.
- This should only set to C(no) used on personally controlled sites using self-signed cer
- Prior to 2.2 the code worked as if this was set to C(yes).
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "2.2"
author: "Dag Wieers (@dagwieers)"
todo:
- re-implement tar support using native tarfile module
- re-implement zip support using native zipfile module
notes:
- requires C(gtar)/C(unzip) command on target host
- can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar)
- uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive
- existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction
- existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- unarchive:
src: foo.tgz
dest: /var/lib/foo
# Unarchive a file that is already on the remote machine
- unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
remote_src: True
# Unarchive a file that needs to be downloaded (added in 2.0)
- unarchive:
src: https://example.com/example.zip
dest: /usr/local/bin
remote_src: True
'''
import re
import os
import stat
import pwd
import grp
import datetime
import time
import binascii
import codecs
from zipfile import ZipFile, BadZipfile
from ansible.module_utils._text import to_bytes, to_text
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
# When downloading an archive, how much of the archive to download before
# saving to a tempfile (64k)
BUFSIZE = 65536
def crc32(path):
''' Return a CRC32 checksum of a file '''
return binascii.crc32(open(path, 'rb').read()) & 0xffffffff
def shell_escape(string):
''' Quote meta-characters in the args for the unix shell '''
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
# class to handle .zip files
class ZipArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.excludes = module.params['exclude']
self.includes = []
self.cmd_path = self.module.get_bin_path('unzip')
self._files_in_archive = []
self._infodict = dict()
def _permstr_to_octal(self, modestr, umask):
''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:
mode += 2**(i+3*j)
# The unzip utility does not support setting the stST bits
# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:
# mode += 2**(9+j)
return ( mode & ~umask )
def _legacy_file_list(self, force_refresh=False):
unzip_bin = self.module.get_bin_path('unzip')
if not unzip_bin:
raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
if rc:
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = int(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipfile:
e = get_exception()
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = int(item.CRC)
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipfile:
e = get_exception()
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list(force_refresh)
else:
raise
else:
try:
for member in archive.namelist():
if member not in self.excludes:
self._files_in_archive.append(to_native(member))
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
archive.close()
return self._files_in_archive
def is_unarchived(self):
cmd = [ self.cmd_path, '-ZT', '-s', self.src ]
if self.excludes:
cmd.extend([ ' -x ', ] + self.excludes)
rc, out, err = self.module.run_command(cmd)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except:
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except:
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwname(self.file_args['owner'])
except:
try:
tpw = pwd.getpwuid(self.file_args['owner'])
except:
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except:
try:
tgr = grp.getgrgid(self.file_args['group'])
except:
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
if len(pcs[0]) != 7 and len(pcs[0]) != 10:
continue
if len(pcs[6]) != 15:
continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
dest = os.path.join(self.dest, path)
try:
st = os.lstat(dest)
except:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
timestamp = time.mktime(dt_object.timetuple())
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(dest)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception:
e = get_exception()
self.module.fail_json(path=path, msg="mode %(mode)s must be in octal form" % self.file_args, details=str(e))
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except:
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except:
gid = st.st_gid
if run_uid != 0 and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [ self.cmd_path, '-o' ]
if self.opts:
cmd.extend(self.opts)
cmd.append(self.src)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend([ '-x' ] + self.excludes)
cmd.extend([ '-d', self.dest ])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Command "unzip" not found.'
cmd = [ self.cmd_path, '-l', self.src ]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# class to handle gzipped tar files
class TgzArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
self.cmd_path = self.module.get_bin_path('gtar', None)
if not self.cmd_path:
# Fallback to tar
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = '-z'
self._files_in_archive = []
if self.cmd_path:
self.tar_type = self._get_tar_type()
else:
self.tar_type = None
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = [ self.cmd_path, '--list', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = codecs.escape_decode(filename)[0]
if filename and filename not in self.excludes:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [ self.cmd_path, '--diff', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
if MOD_TIME_DIFF_RE.search(line):
out += line + '\n'
if MISSING_FILE_RE.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [ self.cmd_path, '--extract', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Commands "gtar" and "tar" not found.'
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError:
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive.' % self.cmd_path
# class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarArchive, self).__init__(src, dest, file_args, module)
# argument to tar
self.zipflag = ''
# class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarBzipArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-j'
# class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarXzArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = ' '.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=True, type='path'),
original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack
dest = dict(required=True, type='path'),
copy = dict(required=False, default=True, type='bool'),
remote_src = dict(required=False, default=False, type='bool'),
creates = dict(required=False, type='path'),
list_files = dict(required=False, default=False, type='bool'),
keep_newer = dict(required=False, default=False, type='bool'),
exclude = dict(required=False, default=[], type='list'),
extra_opts = dict(required=False, default=[], type='list'),
validate_certs = dict(required=False, default=True, type='bool'),
),
add_file_common_args = True,
mutually_exclusive = [("copy", "remote_src"),],
# check-mode only works for zip files, we cover that later
supports_check_mode = True,
)
src = module.params['src']
dest = module.params['dest']
copy = module.params['copy']
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src and copy:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If copy=false, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
tempdir = os.path.dirname(os.path.realpath(__file__))
package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))
try:
rsp, info = fetch_url(module, src)
# If download fails, raise a proper exception
if rsp is None:
raise Exception(info['msg'])
# open in binary mode for python3
f = open(package, 'wb')
# Read 1kb at a time to save on ram
while True:
data = rsp.read(BUFSIZE)
data = to_bytes(data, errors='surrogate_or_strict')
if len(data) < 1:
break # End of file, break while loop
f.write(data)
f.close()
src = package
except Exception:
e = get_exception()
module.fail_json(msg="Failure downloading %s, %s" % (src, e))
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception:
e = get_exception()
module.fail_json(msg="Source '%s' not readable" % src)
# is dest OK to receive tar file?
if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = { 'prepared': check_results['diff'] }
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
except (IOError, OSError):
e = get_exception()
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % e, **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils._text import to_native
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.