repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ringo-framework/ringo_news | ringo_news/__init__.py | 1 | 1241 | import logging
from pyramid.i18n import TranslationStringFactory
from ringo.resources import get_resource_factory
from ringo.lib.i18n import translators
from ringo.lib.extension import register_modul
from ringo.lib.helpers import get_action_routename
# Import models so that alembic is able to autogenerate migrations
# scripts.
from ringo_news.model import News
log = logging.getLogger(__name__)
modul_config = {
"name": "news",
"clazzpath": "ringo_news.model.News",
"label": "News",
"label_plural": "News",
"str_repr": "%s|subject",
"display": "admin-menu",
"actions": ["list", "read", "update", "create", "delete"]
}
def includeme(config):
"""Registers a new modul for ringo.
:config: Dictionary with configuration of the new modul
"""
modul = register_modul(config, modul_config)
if modul:
News._modul_id = modul.get_value("id")
translators.append(TranslationStringFactory('ringo_news'))
config.add_translation_dirs('ringo_news:locale/')
config.add_route(get_action_routename(News, 'markasread', prefix="rest"),
'rest/news/{id}/markasread',
factory=get_resource_factory(News))
config.scan()
| gpl-3.0 |
aleksandr-bakanov/astropy | astropy/units/format/generic.py | 3 | 18514 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import os
import re
import warnings
import sys
from fractions import Fraction
import unicodedata
from . import core, utils
from .base import Base
from astropy.utils import classproperty
from astropy.utils.misc import did_you_mean
def _is_ascii(s):
if sys.version_info >= (3, 7, 0):
return s.isascii()
else:
try:
s.encode('ascii')
return True
except UnicodeEncodeError:
return False
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append(f'{unit.scale:g}')
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append('1')
if len(negatives):
parts.append('/')
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append(f'{unit_list}')
else:
parts.append(f'({unit_list})')
return ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
'DOUBLE_STAR',
'STAR',
'PERIOD',
'SOLIDUS',
'CARET',
'OPEN_PAREN',
'CLOSE_PAREN',
'FUNCNAME',
'UNIT',
'SIGN',
'UINT',
'UFLOAT'
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
from astropy.extern.ply import lex
tokens = cls._tokens
t_STAR = r'\*'
t_PERIOD = r'\.'
t_SOLIDUS = r'/'
t_DOUBLE_STAR = r'\*\*'
t_CARET = r'\^'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
elif t.value.endswith('.'):
t.type = 'UINT'
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = int(t.value + '1')
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r'((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()'
return t
def t_UNIT(t):
"%|([YZEPTGMkhdcmu\N{MICRO SIGN}npfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_lextab.py'))
lexer = lex.lex(optimize=True, lextab='generic_lextab',
outputdir=os.path.dirname(__file__),
reflags=int(re.UNICODE))
if not lexer_exists:
cls._add_tab_header('generic_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
from astropy.extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
'''
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
'''
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
'''
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
'''
inverse_unit : division unit_expression
'''
p[0] = p[2] ** -1
def p_factor(p):
'''
factor : factor_fits
| factor_float
| factor_int
'''
p[0] = p[1]
def p_factor_float(p):
'''
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
'''
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
'''
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
'''
if p[1] != 10:
if cls.name == 'fits':
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ('**', '^'):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
'''
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
'''
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
'''
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
'''
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
'''
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
'''
paren_expr : sign UINT
| signed_float
| frac
'''
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
'''
frac : sign UINT division sign UINT
'''
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_product(p):
'''
product : STAR
| PERIOD
'''
pass
def p_division(p):
'''
division : SOLIDUS
'''
pass
def p_power(p):
'''
power : DOUBLE_STAR
| CARET
'''
p[0] = p[1]
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_function_name(p):
'''
function_name : FUNCNAME
'''
p[0] = p[1]
def p_function(p):
'''
function : function_name OPEN_PAREN main CLOSE_PAREN
'''
if p[1] == 'sqrt':
p[0] = p[3] ** 0.5
return
elif p[1] in ('mag', 'dB', 'dex'):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError("'{}' is not a recognized function".format(p[1]))
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='generic_parsetab',
outputdir=os.path.dirname(__file__))
if not parser_exists:
cls._add_tab_header('generic_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {}, {}".format(
t.lexpos, str(e)))
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s == '%':
return registry['percent']
if not _is_ascii(s):
if s[0] == '\N{MICRO SIGN}':
s = 'u' + s[1:]
if s[-1] == '\N{GREEK CAPITAL LETTER OMEGA}':
s = s[:-1] + 'Ohm'
elif s[-1] == '\N{LATIN CAPITAL LETTER A WITH RING ABOVE}':
s = s[:-1] + 'Angstrom'
if s in registry:
return registry[s]
if detailed_exception:
raise ValueError(
'{} is not a valid unit. {}'.format(
s, did_you_mean(s, registry)))
else:
raise ValueError()
_translations = str.maketrans({
'\N{GREEK SMALL LETTER MU}': '\N{MICRO SIGN}',
'\N{MINUS SIGN}': '-',
})
"""Character translations that should be applied before parsing a string.
Note that this does explicitly *not* generally translate MICRO SIGN to u,
since then a string like 'µ' would be interpreted as unit mass.
"""
_superscripts = (
'\N{SUPERSCRIPT MINUS}'
'\N{SUPERSCRIPT PLUS SIGN}'
'\N{SUPERSCRIPT ZERO}'
'\N{SUPERSCRIPT ONE}'
'\N{SUPERSCRIPT TWO}'
'\N{SUPERSCRIPT THREE}'
'\N{SUPERSCRIPT FOUR}'
'\N{SUPERSCRIPT FIVE}'
'\N{SUPERSCRIPT SIX}'
'\N{SUPERSCRIPT SEVEN}'
'\N{SUPERSCRIPT EIGHT}'
'\N{SUPERSCRIPT NINE}'
)
_superscript_translations = str.maketrans(_superscripts, '-+0123456789')
_regex_superscript = re.compile(f'[{_superscripts}]+')
_regex_deg = re.compile('°([CF])?')
@classmethod
def _convert_superscript(cls, m):
return '({})'.format(
m.group().translate(cls._superscript_translations)
)
@classmethod
def _convert_deg(cls, m):
if len(m.string) == 1:
return 'deg'
return m.string.replace('°', 'deg_')
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode('ascii')
elif not _is_ascii(s):
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize('NFC', s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
# Translate possible degrees.
s = cls._regex_deg.sub(cls._convert_deg, s)
result = cls._do_parse(s, debug=debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count('/')
if n_slashes > 1 and (n_slashes - len(re.findall(r'\(\d+/\d+\)', s))) > 1:
warnings.warn(
"'{}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('generic')
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power or '.' in power:
out.append('{}({})'.format(
cls._get_unit_name(base), power))
else:
out.append('{}{}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
| bsd-3-clause |
victoredwardocallaghan/pygments-main | pygments/styles/xcode.py | 126 | 1501 | # -*- coding: utf-8 -*-
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
| bsd-2-clause |
geniusgordon/NTHUOJ_web | index/views.py | 4 | 5845 | '''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import time
import random
import autocomplete_light
from django.http import Http404
from django.utils import timezone
from utils.log_info import get_logger
from contest.models import Contest
from django.http import HttpResponse
from datetime import datetime, timedelta
from index.models import Announcement
from users.models import User, Notification
from django.shortcuts import render, redirect
from utils.render_helper import render_index
from django.template import RequestContext
from utils.user_info import validate_user
from django.core.urlresolvers import reverse
from django.template import RequestContext
from index.forms import AnnouncementCreationForm
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from problem.models import Problem
from group.models import Group
# Create your views here.
logger = get_logger()
def index(request, alert_info='none'):
present = timezone.now()
time_threshold = datetime.now() + timedelta(days=1)
c_runnings = Contest.objects.filter(start_time__lt=present, end_time__gt=present, is_homework=False)
c_upcomings = Contest.objects.filter(start_time__gt=present, start_time__lt=time_threshold, is_homework=False).order_by('start_time')
announcements = Announcement.objects.filter(start_time__lt=present, end_time__gt=present)
return render_index(request, 'index/index.html',
{'c_runnings': c_runnings, 'c_upcomings': c_upcomings,
'announcements': announcements, 'alert_info': alert_info})
@login_required()
def announcement_create(request):
if not User.has_admin_auth(request.user):
raise PermissionDenied(
'User %s does not have the permission!' % str(request.user))
if request.method == 'POST':
form = AnnouncementCreationForm(request.POST)
if form.is_valid():
announcement = form.save()
announcement.backend = 'django.contrib.auth.backends.ModelBackend'
return redirect(reverse('index:index'))
else:
form = AnnouncementCreationForm()
return render_index(request, 'index/announcement.html',
{'form': form, 'title': 'Create Announcement'})
@login_required()
def announcement_update(request, aid):
if not User.has_admin_auth(request.user):
raise PermissionDenied(
'User %s does not have the permission' % str(request.user))
try:
announcement = Announcement.objects.get(id=long(aid))
except Announcement.DoesNotExist:
raise Exception('Announcement %ld does not exist' % long(aid))
if request.method == 'POST':
form = AnnouncementCreationForm(request.POST, instance=announcement)
if form.is_valid():
updating = form.save()
updating.backend = 'django.contrib.auth.backends.ModelBackend'
return redirect(reverse('index:index'))
else:
form = AnnouncementCreationForm(instance=announcement)
return render_index(request, 'index/announcement.html',
{'form': form, 'announcement': announcement, 'title': 'Update Announcement'})
@login_required()
def announcement_delete(request, aid):
if not User.has_admin_auth(request.user):
raise PermissionDenied(
'User %s does not have the permission' % str(request.user))
try:
announcement = Announcement.objects.get(id=long(aid))
announcement.delete()
except Announcement.DoesNotExist:
raise Exception('Announcement %ld does not exist' % long(aid))
return redirect(reverse('index:index'))
def navigation_autocomplete(request):
now = datetime.now()
q = request.GET.get('q', '')
queries = {}
queries['users'] = User.objects.filter(
username__istartswith=q
)[:5]
queries['problems'] = Problem.objects.filter(
Q(visible=True) & (Q(pname__icontains=q) | Q(id__contains=q))
)[:10]
queries['contests'] = Contest.objects.filter(
Q(start_time__lt=now) & (Q(cname__icontains=q) | Q(id__contains=q))
)[:5]
queries['groups'] = Group.objects.filter(
Q(gname__icontains=q) | Q(id__contains=q)
)[:5]
return render(request, 'index/navigation_autocomplete.html', queries)
def custom_400(request):
return render(request, 'index/400.html', status=400)
def custom_403(request):
return render(request, 'index/403.html', status=403)
def custom_404(request):
return render(request, 'index/404.html', status=404)
def custom_500(request):
return render(request, 'index/500.html', {'error_message': 'error'}, status=500)
def base(request):
return render_index(request, 'index/base.html', {})
| mit |
resmo/ansible | lib/ansible/modules/cloud/openstack/os_user.py | 24 | 10053 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user
short_description: Manage OpenStack Identity Users
extends_documentation_fragment: openstack
author: David Shrewsbury (@Shrews)
version_added: "2.0"
description:
- Manage OpenStack Identity users. Users can be created,
updated or deleted using this module. A user will be updated
if I(name) matches an existing user and I(state) is present.
The value for I(name) cannot be updated without deleting and
re-creating the user.
options:
name:
description:
- Username for the user
required: true
password:
description:
- Password for the user
update_password:
required: false
choices: ['always', 'on_create']
version_added: "2.3"
description:
- C(always) will attempt to update password. C(on_create) will only
set the password for newly created users.
email:
description:
- Email address for the user
description:
description:
- Description about the user
version_added: "2.4"
default_project:
description:
- Project name or ID that the user should be associated with by default
domain:
description:
- Domain to create the user in if the cloud supports domains
enabled:
description:
- Is the user enabled
type: bool
default: 'yes'
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a user
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
email: demo@example.com
domain: default
default_project: demo
# Delete a user
- os_user:
cloud: mycloud
state: absent
name: demouser
# Create a user but don't update password if user exists
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
update_password: on_create
email: demo@example.com
domain: default
default_project: demo
# Create a user without password
- os_user:
cloud: mycloud
state: present
name: demouser
email: demo@example.com
domain: default
default_project: demo
'''
RETURN = '''
user:
description: Dictionary describing the user.
returned: On success when I(state) is 'present'
type: complex
contains:
default_project_id:
description: User default project ID. Only present with Keystone >= v3.
type: str
sample: "4427115787be45f08f0ec22a03bfc735"
domain_id:
description: User domain ID. Only present with Keystone >= v3.
type: str
sample: "default"
email:
description: User email address
type: str
sample: "demo@example.com"
id:
description: User ID
type: str
sample: "f59382db809c43139982ca4189404650"
name:
description: User name
type: str
sample: "demouser"
'''
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(params_dict, user):
for k in params_dict:
if k not in ('password', 'update_password') and user[k] != params_dict[k]:
return True
# We don't get password back in the user object, so assume any supplied
# password is a change.
if (params_dict['password'] is not None and
params_dict['update_password'] == 'always'):
return True
return False
def _get_domain_id(cloud, domain):
try:
# We assume admin is passing domain id
domain_id = cloud.get_domain(domain)['id']
except Exception:
# If we fail, maybe admin is passing a domain name.
# Note that domains have unique names, just like id.
try:
domain_id = cloud.search_domains(filters={'name': domain})[0]['id']
except Exception:
# Ok, let's hope the user is non-admin and passing a sane id
domain_id = domain
return domain_id
def _get_default_project_id(cloud, default_project, domain_id, module):
project = cloud.get_project(default_project, domain_id=domain_id)
if not project:
module.fail_json(msg='Default project %s is not valid' % default_project)
return project['id']
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
password=dict(required=False, default=None, no_log=True),
email=dict(required=False, default=None),
default_project=dict(required=False, default=None),
description=dict(type='str'),
domain=dict(required=False, default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default=None, choices=['always', 'on_create']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
**module_kwargs)
name = module.params['name']
password = module.params.get('password')
email = module.params['email']
default_project = module.params['default_project']
domain = module.params['domain']
enabled = module.params['enabled']
state = module.params['state']
update_password = module.params['update_password']
description = module.params['description']
sdk, cloud = openstack_cloud_from_module(module)
try:
domain_id = None
if domain:
domain_id = _get_domain_id(cloud, domain)
user = cloud.get_user(name, domain_id=domain_id)
else:
user = cloud.get_user(name)
if state == 'present':
if update_password in ('always', 'on_create'):
if not password:
msg = "update_password is %s but a password value is missing" % update_password
module.fail_json(msg=msg)
default_project_id = None
if default_project:
default_project_id = _get_default_project_id(cloud, default_project, domain_id, module)
if user is None:
if description is not None:
user = cloud.create_user(
name=name, password=password, email=email,
default_project=default_project_id, domain_id=domain_id,
enabled=enabled, description=description)
else:
user = cloud.create_user(
name=name, password=password, email=email,
default_project=default_project_id, domain_id=domain_id,
enabled=enabled)
changed = True
else:
params_dict = {'email': email, 'enabled': enabled,
'password': password,
'update_password': update_password}
if description is not None:
params_dict['description'] = description
if domain_id is not None:
params_dict['domain_id'] = domain_id
if default_project_id is not None:
params_dict['default_project_id'] = default_project_id
if _needs_update(params_dict, user):
if update_password == 'always':
if description is not None:
user = cloud.update_user(
user['id'], password=password, email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled, description=description)
else:
user = cloud.update_user(
user['id'], password=password, email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
else:
if description is not None:
user = cloud.update_user(
user['id'], email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled, description=description)
else:
user = cloud.update_user(
user['id'], email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, user=user)
elif state == 'absent':
if user is None:
changed = False
else:
if domain:
cloud.delete_user(user['id'], domain_id=domain_id)
else:
cloud.delete_user(user['id'])
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
| gpl-3.0 |
etzhou/edx-platform | lms/djangoapps/lti_provider/migrations/0002_create_lti_outcome_management.py | 84 | 8078 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name, missing-docstring, unused-argument, unused-import, line-too-long
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OutcomeService'
db.create_table('lti_provider_outcomeservice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('lis_outcome_service_url', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('lti_consumer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lti_provider.LtiConsumer'])),
))
db.send_create_signal('lti_provider', ['OutcomeService'])
# Adding model 'GradedAssignment'
db.create_table('lti_provider_gradedassignment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('course_key', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('usage_key', self.gf('xmodule_django.models.UsageKeyField')(max_length=255, db_index=True)),
('outcome_service', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lti_provider.OutcomeService'])),
('lis_result_sourcedid', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('lti_provider', ['GradedAssignment'])
# Adding unique constraint on 'GradedAssignment', fields ['outcome_service', 'lis_result_sourcedid']
db.create_unique('lti_provider_gradedassignment', ['outcome_service_id', 'lis_result_sourcedid'])
# Adding field 'LtiConsumer.instance_guid'
db.add_column('lti_provider_lticonsumer', 'instance_guid',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
# Adding unique constraint on 'LtiConsumer', fields ['consumer_name']
db.create_unique('lti_provider_lticonsumer', ['consumer_name'])
def backwards(self, orm):
# Removing unique constraint on 'LtiConsumer', fields ['consumer_name']
db.delete_unique('lti_provider_lticonsumer', ['consumer_name'])
# Removing unique constraint on 'GradedAssignment', fields ['outcome_service', 'lis_result_sourcedid']
db.delete_unique('lti_provider_gradedassignment', ['outcome_service_id', 'lis_result_sourcedid'])
# Deleting model 'OutcomeService'
db.delete_table('lti_provider_outcomeservice')
# Deleting model 'GradedAssignment'
db.delete_table('lti_provider_gradedassignment')
# Deleting field 'LtiConsumer.instance_guid'
db.delete_column('lti_provider_lticonsumer', 'instance_guid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lti_provider.gradedassignment': {
'Meta': {'unique_together': "(('outcome_service', 'lis_result_sourcedid'),)", 'object_name': 'GradedAssignment'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lis_result_sourcedid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'outcome_service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.OutcomeService']"}),
'usage_key': ('xmodule_django.models.UsageKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'lti_provider.lticonsumer': {
'Meta': {'object_name': 'LtiConsumer'},
'consumer_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'consumer_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'consumer_secret': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'lti_provider.outcomeservice': {
'Meta': {'object_name': 'OutcomeService'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lis_outcome_service_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lti_consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lti_provider.LtiConsumer']"})
}
}
complete_apps = ['lti_provider']
| agpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/libs/liboozie/src/liboozie/types.py | 2 | 17663 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Oozie API classes.
This is mostly just codifying the datastructure of the Oozie REST API.
http://incubator.apache.org/oozie/docs/3.2.0-incubating/docs/WebServicesAPI.html
"""
import re
from cStringIO import StringIO
from time import mktime
from desktop.lib import i18n
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_warn
import hadoop.confparse
from liboozie.utils import parse_timestamp, format_time
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
class Action(object):
def __init__(self, json_dict):
for attr in self._ATTRS:
setattr(self, attr, json_dict.get(attr))
self._fixup()
def _fixup(self): pass
def is_finished(self):
return self.status in ('OK', 'SUCCEEDED', 'DONE')
@classmethod
def create(self, action_class, action_dict):
if ControlFlowAction.is_control_flow(action_dict.get('type')):
return ControlFlowAction(action_dict)
else:
return action_class(action_dict)
def __str__(self):
return '%s - %s' % (self.type, self.name)
class ControlFlowAction(Action):
_ATTRS = [
'errorMessage',
'status',
'stats',
'data',
'transition',
'externalStatus',
'cred',
'conf',
'type',
'endTime',
'externalId',
'id',
'startTime',
'externalChildIDs',
'name',
'errorCode',
'trackerUri',
'retries',
'toString',
'consoleUrl'
]
@classmethod
def is_control_flow(self, action_type):
return action_type is not None and (':' in action_type)
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
- protect externalId
"""
super(ControlFlowAction, self)._fixup()
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
if self.retries:
self.retries = int(self.retries)
if self.externalId and not re.match('job_.*', self.externalId):
self.externalId = None
self.conf_dict = {}
class WorkflowAction(Action):
_ATTRS = [
'conf',
'consoleUrl',
'data',
'endTime',
'errorCode',
'errorMessage',
'externalId',
'externalStatus',
'id',
'name',
'retries',
'startTime',
'status',
'trackerUri',
'transition',
'type',
'externalChildIDs',
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(WorkflowAction, self)._fixup()
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
if self.retries:
self.retries = int(self.retries)
if self.conf:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def get_absolute_url(self):
related_job_ids = []
if hasattr(self, 'oozie_coordinator') and self.oozie_coordinator:
related_job_ids.append('coordinator_job_id=%s' % self.oozie_coordinator.id)
if hasattr(self, 'oozie_bundle') and self.oozie_bundle:
related_job_ids.append('bundle_job_id=%s' % self.oozie_bundle.id)
if related_job_ids:
extra_params = '?' + '&'.join(related_job_ids)
else:
extra_params = ''
return reverse('oozie:list_oozie_workflow_action', kwargs={'action': self.id}) + extra_params
def get_absolute_log_url(self):
url = None
if self.externalId and re.match('job_.*', self.externalId):
url = self.externalId and reverse('jobbrowser.views.job_single_logs', kwargs={'job': self.externalId}) or ''
return url
def get_external_id_url(self):
url = None
if self.externalId and self.externalId.endswith('W'):
url = reverse('oozie:list_oozie_workflow', kwargs={'job_id': self.externalId}) or ''
elif self.externalId and re.match('job_.*', self.externalId):
url = reverse('jobbrowser.views.single_job', kwargs={'job': self.externalId}) or ''
return url
class CoordinatorAction(Action):
_ATTRS = [
'status',
'runConf',
'errorMessage',
'missingDependencies',
'coordJobId',
'errorCode',
'actionNumber',
'consoleUrl',
'nominalTime',
'externalStatus',
'createdConf',
'createdTime',
'externalId',
'lastModifiedTime',
'type',
'id',
'trackerUri'
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(CoordinatorAction, self)._fixup()
if self.createdTime:
self.createdTime = parse_timestamp(self.createdTime)
if self.nominalTime:
self.nominalTime = parse_timestamp(self.nominalTime)
if self.lastModifiedTime:
self.lastModifiedTime = parse_timestamp(self.lastModifiedTime)
if self.runConf:
xml = StringIO(i18n.smart_str(self.runConf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
self.title = ' %s-%s'% (self.actionNumber, format_time(self.nominalTime))
class BundleAction(Action):
_ATTRS = [
'startTime',
'actions',
'frequency',
'concurrency',
'pauseTime',
'group',
'toString',
'consoleUrl',
'mat_throttling',
'status',
'conf',
'user',
'timeOut',
'coordJobPath',
'timeUnit',
'coordJobId',
'coordJobName',
'nextMaterializedTime',
'coordExternalId',
'acl',
'lastAction',
'executionPolicy',
'timeZone',
'endTime'
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(BundleAction, self)._fixup()
self.type = 'coord-action'
self.name = self.coordJobName
if self.conf:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def get_progress(self):
"""How much more time before the next action."""
if self.lastAction is None:
return 0
next = mktime(parse_timestamp(self.lastAction))
start = mktime(parse_timestamp(self.startTime))
end = mktime(parse_timestamp(self.endTime))
if end != start:
progress = min(int((1 - (end - next) / (end - start)) * 100), 100)
else:
progress = 100
return progress
class Job(object):
MAX_LOG_SIZE = 3500 * 20 # 20 pages
"""
Accessing log and definition will trigger Oozie API calls.
"""
def __init__(self, api, json_dict):
for attr in self._ATTRS:
setattr(self, attr, json_dict.get(attr))
self._fixup()
self._api = api
self._log = None
self._definition = None
def _fixup(self):
"""
Fixup fields:
- expand actions
- time fields are struct_time
- run is integer
- configuration dict
- log
- definition
"""
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
self.actions = [Action.create(self.ACTION, act_dict) for act_dict in self.actions]
if self.conf is not None:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def _get_log(self):
"""Get the log lazily, trigger Oozie API call at the first access."""
if self._log is None:
self._log = self._api.get_job_log(self.id)
return self._log[-Job.MAX_LOG_SIZE:]
log = property(_get_log)
def _get_definition(self):
"""Get the definition lazily, trigger Oozie API call at the first access."""
if self._definition is None:
self._definition = self._api.get_job_definition(self.id)
return self._definition
definition = property(_get_definition)
def start(self):
self._api.job_control(self.id, 'start')
def suspend(self):
self._api.job_control(self.id, 'suspend')
def resume(self):
self._api.job_control(self.id, 'resume')
def kill(self):
self._api.job_control(self.id, 'kill')
def available_actions(self):
"""
available_actions() -> Zero or more of [ 'start', 'suspend', 'resume', 'kill' ]
"""
if self.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return []
res = []
if self.status == 'PREP':
res.append('start')
if self.status == 'RUNNING':
res.append('suspend')
if self.status == 'SUSPENDED':
res.append('resume')
res.append('kill')
return res
def check_request_permission(self, request):
"""Raise PopupException if request user doesn't have permission to modify workflow"""
if not request.user.is_superuser and request.user.username != self.user:
access_warn(request, _('Insufficient permission.'))
raise PopupException(_("Permission denied. User %(username)s cannot modify user %(user)s's job.") %
dict(username=request.user.username, user=self.user))
def get_control_flow_actions(self):
return [action for action in self.actions if ControlFlowAction.is_control_flow(action.type)]
def get_working_actions(self):
return [action for action in self.actions if not ControlFlowAction.is_control_flow(action.type)]
def is_running(self):
return self.status in Workflow.RUNNING_STATUSES | Coordinator.RUNNING_STATUSES | Bundle.RUNNING_STATUSES
def __str__(self):
return '%s - %s' % (self.id, self.status)
@property
def has_sla(self):
return '<sla:info>' in self.definition
class Workflow(Job):
_ATTRS = [
'actions',
'appName',
'appPath',
'conf',
'consoleUrl',
'createdTime',
'endTime',
'externalId',
'group',
'id',
'lastModTime',
'run',
'startTime',
'status',
'user',
'acl',
'parentId'
]
ACTION = WorkflowAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'SUSPENDED'])
FINISHED_STATUSES = set(['SUCCEEDED', 'KILLED', 'FAILED'])
def _fixup(self):
super(Workflow, self)._fixup()
if self.createdTime:
self.createdTime = parse_timestamp(self.createdTime)
if self.lastModTime:
self.lastModTime = parse_timestamp(self.lastModTime)
if self.run:
self.run = int(self.run)
@property
def type(self):
return 'Workflow'
def get_parent_job_id(self):
if self.parentId and '@' in self.parentId:
return self.parentId.split('@')[0]
return self.parentId
def get_absolute_url(self, format='html'):
extra_params = []
if format == 'json':
extra_params.append('format=json')
if hasattr(self, 'oozie_coordinator') and self.oozie_coordinator:
extra_params.append('coordinator_job_id=%s' % self.oozie_coordinator.id)
if hasattr(self, 'oozie_bundle') and self.oozie_bundle:
extra_params.append('bundle_job_id=%s' % self.oozie_bundle.id)
if extra_params:
extra_params = '?' + '&'.join(extra_params)
else:
extra_params = ''
return reverse('oozie:list_oozie_workflow', kwargs={'job_id': self.id}) + extra_params
def get_progress(self, full_node_list=None):
if self.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return 100 # Case of decision nodes
else:
if full_node_list is not None: # Should remove the un-reached branches if decision node
total_actions = len(full_node_list) - 1 # -1 because of Kill node
else:
total_actions = len(self.actions)
return int(sum([action.is_finished() for action in self.actions]) / float(max(total_actions, 1)) * 100)
class Coordinator(Job):
_ATTRS = [
'acl',
'actions',
'conf',
'concurrency',
'consoleUrl',
'coordExternalId',
'coordJobId',
'coordJobName',
'coordJobPath',
'endTime',
'executionPolicy',
'frequency',
'group',
'lastAction',
'mat_throttling',
'nextMaterializedTime',
'pauseTime',
'startTime',
'status',
'timeOut',
'timeUnit',
'timeZone',
'user',
'bundleId',
'total'
]
ACTION = CoordinatorAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'RUNNINGWITHERROR', 'PREPSUSPENDED', 'SUSPENDED', 'SUSPENDEDWITHERROR', 'PREPPAUSED', 'PAUSED', 'PAUSEDWITHERROR'])
FINISHED_STATUSES = set(['SUCCEEDED', 'DONEWITHERROR', 'KILLED', 'FAILED'])
def _fixup(self):
super(Coordinator, self)._fixup()
if self.nextMaterializedTime is not None:
self.nextMaterializedTime = parse_timestamp(self.nextMaterializedTime)
else:
self.nextMaterializedTime = self.startTime
if self.pauseTime:
self.pauseTime = parse_timestamp(self.pauseTime)
# For when listing/mixing all the jobs together
self.id = self.coordJobId
self.appName = self.coordJobName
@property
def type(self):
return 'Coordinator'
def get_absolute_url(self, oozie_bundle=None, format='html'):
extra_params = []
if format == 'json':
extra_params.append('format=json')
if oozie_bundle:
extra_params.append('bundle_job_id=%s' % oozie_bundle.id)
if hasattr(self, 'bundleId') and self.bundleId:
extra_params.append('bundle_job_id=%s' % self.bundleId)
if extra_params:
extra_params = '?' + '&'.join(extra_params)
else:
extra_params = ''
return reverse('oozie:list_oozie_coordinator', kwargs={'job_id': self.id}) + extra_params
def get_progress(self):
"""How much more time before the final materialization."""
next = mktime(self.nextMaterializedTime)
start = mktime(self.startTime)
end = mktime(self.endTime)
if end != start:
progress = min(int((1 - (end - next) / (end - start)) * 100), 100)
else:
progress = 100
# Manage case of a rerun
action_count = float(len(self.actions))
if action_count != 0 and progress == 100:
progress = int(sum([action.is_finished() for action in self.actions]) / action_count * 100)
return progress
@classmethod
def aggreate(cls, actions):
if not actions:
return []
result = []
first = prev = actions[0]
for a in actions[1:]:
if int(a) != int(prev) + 1:
result.append('-'.join((first, prev)))
first = a
prev = a
result.append('-'.join((first, prev)))
return result
@property
def human_frequency(self):
from oozie.models import Coordinator
return Coordinator.CRON_MAPPING.get(self.frequency, self.frequency)
class Bundle(Job):
_ATTRS = [
'status',
'toString',
'group',
'conf',
'bundleJobName',
'startTime',
'bundleCoordJobs',
'kickoffTime',
'acl',
'bundleJobPath',
'createdTime',
'timeOut',
'consoleUrl',
'bundleExternalId',
'timeUnit',
'pauseTime',
'bundleJobId',
'endTime',
'user',
]
ACTION = BundleAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'RUNNINGWITHERROR', 'SUSPENDED', 'PREPSUSPENDED', 'SUSPENDEDWITHERROR', 'PAUSED', 'PAUSEDWITHERROR', 'PREPPAUSED'])
FINISHED_STATUSES = set(['SUCCEEDED', 'DONEWITHERROR', 'KILLED', 'FAILED'])
def _fixup(self):
self.actions = self.bundleCoordJobs
super(Bundle, self)._fixup()
# For when listing/mixing all the jobs together
self.id = self.bundleJobId
self.appName = self.bundleJobName
@property
def type(self):
return 'Bundle'
def get_absolute_url(self, format='html'):
extra_params = ''
if format == 'json':
extra_params = '?format=json'
return reverse('oozie:list_oozie_bundle', kwargs={'job_id': self.id}) + extra_params
def get_progress(self):
progresses = [action.get_progress() for action in self.actions]
count = len(progresses)
if count != 0:
return sum(progresses) / float(count)
else:
return 0
class JobList(object):
"""
Represents a list of Oozie jobs (Workflows or Coordinators or Bundles).
"""
_ATTRS = [
'offset',
'len',
'total',
'jobs',
]
def __init__(self, klass, jobs_key, api, json_dict, filters=None):
"""
json_dict is the oozie json.
filters is (optionally) the list of filters used to select this list
"""
self._api = api
self.offset = int(json_dict['offset'])
self.total = int(json_dict['total'])
self.jobs = [klass(self._api, wf_dict) for wf_dict in json_dict[jobs_key]]
self.filters = filters
class WorkflowList(JobList):
def __init__(self, api, json_dict, filters=None):
super(WorkflowList, self).__init__(Workflow, 'workflows', api, json_dict, filters)
class CoordinatorList(JobList):
def __init__(self, api, json_dict, filters=None):
super(CoordinatorList, self).__init__(Coordinator, 'coordinatorjobs', api, json_dict, filters)
class BundleList(JobList):
def __init__(self, api, json_dict, filters=None):
super(BundleList, self).__init__(Bundle, 'bundlejobs', api, json_dict, filters)
| apache-2.0 |
t794104/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_block_storage.py | 44 | 9899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_block_storage
short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean
description:
- Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet.
version_added: "2.2"
options:
command:
description:
- Which operation do you want to perform.
choices: ['create', 'attach']
required: true
state:
description:
- Indicate desired state of the target.
choices: ['present', 'absent']
required: true
block_size:
description:
- The size of the Block Storage volume in gigabytes. Required when command=create and state=present. If snapshot_id is included, this will be ignored.
volume_name:
description:
- The name of the Block Storage volume.
required: true
description:
description:
- Description of the Block Storage volume.
region:
description:
- The slug of the region where your Block Storage volume should be located in. If snapshot_id is included, this will be ignored.
required: true
snapshot_id:
version_added: "2.5"
description:
- The snapshot id you would like the Block Storage volume created with. If included, region and block_size will be ignored and changed to null.
droplet_id:
description:
- The droplet id you want to operate on. Required when command=attach.
extends_documentation_fragment: digital_ocean.documentation
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
They both refer to the v2 token.
- If snapshot_id is used, region and block_size will be ignored and changed to null.
author:
- "Harnek Sidhu (@harneksidhu)"
'''
EXAMPLES = '''
# Create new Block Storage
- digital_ocean_block_storage:
state: present
command: create
api_token: <TOKEN>
region: nyc1
block_size: 10
volume_name: nyc1-block-storage
# Delete Block Storage
- digital_ocean_block_storage:
state: absent
command: create
api_token: <TOKEN>
region: nyc1
volume_name: nyc1-block-storage
# Attach Block Storage to a Droplet
- digital_ocean_block_storage:
state: present
command: attach
api_token: <TOKEN>
volume_name: nyc1-block-storage
region: nyc1
droplet_id: <ID>
# Detach Block Storage from a Droplet
- digital_ocean_block_storage:
state: absent
command: attach
api_token: <TOKEN>
volume_name: nyc1-block-storage
region: nyc1
droplet_id: <ID>
'''
RETURN = '''
id:
description: Unique identifier of a Block Storage volume returned during creation.
returned: changed
type: str
sample: "69b25d9a-494c-12e6-a5af-001f53126b44"
'''
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
class DOBlockStorageException(Exception):
pass
class DOBlockStorage(object):
def __init__(self, module):
self.module = module
self.rest = DigitalOceanHelper(module)
def get_key_or_fail(self, k):
v = self.module.params[k]
if v is None:
self.module.fail_json(msg='Unable to load %s' % k)
return v
def poll_action_for_complete_status(self, action_id):
url = 'actions/{0}'.format(action_id)
end_time = time.time() + self.module.params['timeout']
while time.time() < end_time:
time.sleep(2)
response = self.rest.get(url)
status = response.status_code
json = response.json
if status == 200:
if json['action']['status'] == 'completed':
return True
elif json['action']['status'] == 'errored':
raise DOBlockStorageException(json['message'])
raise DOBlockStorageException('Unable to reach api.digitalocean.com')
def get_attached_droplet_ID(self, volume_name, region):
url = 'volumes?name={0}®ion={1}'.format(volume_name, region)
response = self.rest.get(url)
status = response.status_code
json = response.json
if status == 200:
volumes = json['volumes']
if len(volumes) > 0:
droplet_ids = volumes[0]['droplet_ids']
if len(droplet_ids) > 0:
return droplet_ids[0]
return None
else:
raise DOBlockStorageException(json['message'])
def attach_detach_block_storage(self, method, volume_name, region, droplet_id):
data = {
'type': method,
'volume_name': volume_name,
'region': region,
'droplet_id': droplet_id
}
response = self.rest.post('volumes/actions', data=data)
status = response.status_code
json = response.json
if status == 202:
return self.poll_action_for_complete_status(json['action']['id'])
elif status == 200:
return True
elif status == 422:
return False
else:
raise DOBlockStorageException(json['message'])
def create_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
snapshot_id = self.module.params['snapshot_id']
if snapshot_id:
self.module.params['block_size'] = None
self.module.params['region'] = None
block_size = None
region = None
else:
block_size = self.get_key_or_fail('block_size')
region = self.get_key_or_fail('region')
description = self.module.params['description']
data = {
'size_gigabytes': block_size,
'name': volume_name,
'description': description,
'region': region,
'snapshot_id': snapshot_id,
}
response = self.rest.post("volumes", data=data)
status = response.status_code
json = response.json
if status == 201:
self.module.exit_json(changed=True, id=json['volume']['id'])
elif status == 409 and json['id'] == 'conflict':
self.module.exit_json(changed=False)
else:
raise DOBlockStorageException(json['message'])
def delete_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
url = 'volumes?name={0}®ion={1}'.format(volume_name, region)
attached_droplet_id = self.get_attached_droplet_ID(volume_name, region)
if attached_droplet_id is not None:
self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id)
response = self.rest.delete(url)
status = response.status_code
json = response.json
if status == 204:
self.module.exit_json(changed=True)
elif status == 404:
self.module.exit_json(changed=False)
else:
raise DOBlockStorageException(json['message'])
def attach_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
droplet_id = self.get_key_or_fail('droplet_id')
attached_droplet_id = self.get_attached_droplet_ID(volume_name, region)
if attached_droplet_id is not None:
if attached_droplet_id == droplet_id:
self.module.exit_json(changed=False)
else:
self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id)
changed_status = self.attach_detach_block_storage('attach', volume_name, region, droplet_id)
self.module.exit_json(changed=changed_status)
def detach_block_storage(self):
volume_name = self.get_key_or_fail('volume_name')
region = self.get_key_or_fail('region')
droplet_id = self.get_key_or_fail('droplet_id')
changed_status = self.attach_detach_block_storage('detach', volume_name, region, droplet_id)
self.module.exit_json(changed=changed_status)
def handle_request(module):
block_storage = DOBlockStorage(module)
command = module.params['command']
state = module.params['state']
if command == 'create':
if state == 'present':
block_storage.create_block_storage()
elif state == 'absent':
block_storage.delete_block_storage()
elif command == 'attach':
if state == 'present':
block_storage.attach_block_storage()
elif state == 'absent':
block_storage.detach_block_storage()
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
state=dict(choices=['present', 'absent'], required=True),
command=dict(choices=['create', 'attach'], required=True),
block_size=dict(type='int', required=False),
volume_name=dict(type='str', required=True),
description=dict(type='str'),
region=dict(type='str', required=False),
snapshot_id=dict(type='str', required=False),
droplet_id=dict(type='int')
)
module = AnsibleModule(argument_spec=argument_spec)
try:
handle_request(module)
except DOBlockStorageException as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
fpischedda/pywek20_tweet_feed | tweet_feed/producer.py | 1 | 1175 | import pika
import ujson
class Producer:
EXCHANGE_NAME = 'feed_source'
EXCHANGE_TYPE = 'fanout'
def __init__(self, amqp_url=None):
if amqp_url is not None:
self.connect(amqp_url)
else:
self.connection = None
self.properties = pika.BasicProperties('application/json')
def connect(self, amqp_url):
self.connection = pika.BlockingConnection(
pika.URLParameters(amqp_url)
)
self.channel = self.connection.channel()
self.channel.exchange_declare(self.EXCHANGE_NAME,
self.EXCHANGE_TYPE,
passive=False,
durable=True,
auto_delete=False)
def send_msg(self, msg_obj):
self.channel.basic_publish(exchange=self.EXCHANGE_NAME,
routing_key='/',
body=ujson.dumps(msg_obj),
properties=self.properties
)
def close_connection(self):
self.connection.close()
| bsd-3-clause |
ArcherSys/ArcherSys | eclipse/plugins/org.python.pydev_4.5.5.201603221110/pysrc/pydevd_attach_to_process/winappdbg/util.py | 102 | 36223 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Miscellaneous utility classes and functions.
@group Helpers:
PathOperations,
MemoryAddresses,
CustomAddressIterator,
DataAddressIterator,
ImageAddressIterator,
MappedAddressIterator,
ExecutableAddressIterator,
ReadableAddressIterator,
WriteableAddressIterator,
ExecutableAndWriteableAddressIterator,
DebugRegister,
Regenerator,
BannerHelpFormatter,
StaticClass,
classproperty
"""
__revision__ = "$Id$"
__all__ = [
# Filename and pathname manipulation
'PathOperations',
# Memory address operations
'MemoryAddresses',
'CustomAddressIterator',
'DataAddressIterator',
'ImageAddressIterator',
'MappedAddressIterator',
'ExecutableAddressIterator',
'ReadableAddressIterator',
'WriteableAddressIterator',
'ExecutableAndWriteableAddressIterator',
# Debug registers manipulation
'DebugRegister',
# Miscellaneous
'Regenerator',
]
import sys
import os
import ctypes
import optparse
from winappdbg import win32
from winappdbg import compat
#==============================================================================
class classproperty(property):
"""
Class property method.
Only works for getting properties, if you set them
the symbol gets overwritten in the class namespace.
Inspired on: U{http://stackoverflow.com/a/7864317/426293}
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=""):
if fset is not None or fdel is not None:
raise NotImplementedError()
super(classproperty, self).__init__(fget=classmethod(fget), doc=doc)
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class BannerHelpFormatter(optparse.IndentedHelpFormatter):
"Just a small tweak to optparse to be able to print a banner."
def __init__(self, banner, *argv, **argd):
self.banner = banner
optparse.IndentedHelpFormatter.__init__(self, *argv, **argd)
def format_usage(self, usage):
msg = optparse.IndentedHelpFormatter.format_usage(self, usage)
return '%s\n%s' % (self.banner, msg)
# See Process.generate_memory_snapshot()
class Regenerator(object):
"""
Calls a generator and iterates it. When it's finished iterating, the
generator is called again. This allows you to iterate a generator more
than once (well, sort of).
"""
def __init__(self, g_function, *v_args, **d_args):
"""
@type g_function: function
@param g_function: Function that when called returns a generator.
@type v_args: tuple
@param v_args: Variable arguments to pass to the generator function.
@type d_args: dict
@param d_args: Variable arguments to pass to the generator function.
"""
self.__g_function = g_function
self.__v_args = v_args
self.__d_args = d_args
self.__g_object = None
def __iter__(self):
'x.__iter__() <==> iter(x)'
return self
def next(self):
'x.next() -> the next value, or raise StopIteration'
if self.__g_object is None:
self.__g_object = self.__g_function( *self.__v_args, **self.__d_args )
try:
return self.__g_object.next()
except StopIteration:
self.__g_object = None
raise
class StaticClass (object):
def __new__(cls, *argv, **argd):
"Don't try to instance this class, just use the static methods."
raise NotImplementedError(
"Cannot instance static class %s" % cls.__name__)
#==============================================================================
class PathOperations (StaticClass):
"""
Static methods for filename and pathname manipulation.
"""
@staticmethod
def path_is_relative(path):
"""
@see: L{path_is_absolute}
@type path: str
@param path: Absolute or relative path.
@rtype: bool
@return: C{True} if the path is relative, C{False} if it's absolute.
"""
return win32.PathIsRelative(path)
@staticmethod
def path_is_absolute(path):
"""
@see: L{path_is_relative}
@type path: str
@param path: Absolute or relative path.
@rtype: bool
@return: C{True} if the path is absolute, C{False} if it's relative.
"""
return not win32.PathIsRelative(path)
@staticmethod
def make_relative(path, current = None):
"""
@type path: str
@param path: Absolute path.
@type current: str
@param current: (Optional) Path to the current directory.
@rtype: str
@return: Relative path.
@raise WindowsError: It's impossible to make the path relative.
This happens when the path and the current path are not on the
same disk drive or network share.
"""
return win32.PathRelativePathTo(pszFrom = current, pszTo = path)
@staticmethod
def make_absolute(path):
"""
@type path: str
@param path: Relative path.
@rtype: str
@return: Absolute path.
"""
return win32.GetFullPathName(path)[0]
@staticmethod
def split_extension(pathname):
"""
@type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return:
Tuple containing the file and extension components of the filename.
"""
filepart = win32.PathRemoveExtension(pathname)
extpart = win32.PathFindExtension(pathname)
return (filepart, extpart)
@staticmethod
def split_filename(pathname):
"""
@type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return: Tuple containing the path to the file and the base filename.
"""
filepart = win32.PathFindFileName(pathname)
pathpart = win32.PathRemoveFileSpec(pathname)
return (pathpart, filepart)
@staticmethod
def split_path(path):
"""
@see: L{join_path}
@type path: str
@param path: Absolute or relative path.
@rtype: list( str... )
@return: List of path components.
"""
components = list()
while path:
next = win32.PathFindNextComponent(path)
if next:
prev = path[ : -len(next) ]
components.append(prev)
path = next
return components
@staticmethod
def join_path(*components):
"""
@see: L{split_path}
@type components: tuple( str... )
@param components: Path components.
@rtype: str
@return: Absolute or relative path.
"""
if components:
path = components[0]
for next in components[1:]:
path = win32.PathAppend(path, next)
else:
path = ""
return path
@staticmethod
def native_to_win32_pathname(name):
"""
@type name: str
@param name: Native (NT) absolute pathname.
@rtype: str
@return: Win32 absolute pathname.
"""
# XXX TODO
# There are probably some native paths that
# won't be converted by this naive approach.
if name.startswith(compat.b("\\")):
if name.startswith(compat.b("\\??\\")):
name = name[4:]
elif name.startswith(compat.b("\\SystemRoot\\")):
system_root_path = os.environ['SYSTEMROOT']
if system_root_path.endswith('\\'):
system_root_path = system_root_path[:-1]
name = system_root_path + name[11:]
else:
for drive_number in compat.xrange(ord('A'), ord('Z') + 1):
drive_letter = '%c:' % drive_number
try:
device_native_path = win32.QueryDosDevice(drive_letter)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror in (win32.ERROR_FILE_NOT_FOUND, \
win32.ERROR_PATH_NOT_FOUND):
continue
raise
if not device_native_path.endswith(compat.b('\\')):
device_native_path += compat.b('\\')
if name.startswith(device_native_path):
name = drive_letter + compat.b('\\') + \
name[ len(device_native_path) : ]
break
return name
@staticmethod
def pathname_to_filename(pathname):
"""
Equivalent to: C{PathOperations.split_filename(pathname)[0]}
@note: This function is preserved for backwards compatibility with
WinAppDbg 1.4 and earlier. It may be removed in future versions.
@type pathname: str
@param pathname: Absolute path to a file.
@rtype: str
@return: Filename component of the path.
"""
return win32.PathFindFileName(pathname)
#==============================================================================
class MemoryAddresses (StaticClass):
"""
Class to manipulate memory addresses.
@type pageSize: int
@cvar pageSize: Page size in bytes. Defaults to 0x1000 but it's
automatically updated on runtime when importing the module.
"""
@classproperty
def pageSize(cls):
"""
Try to get the pageSize value on runtime.
"""
try:
try:
pageSize = win32.GetSystemInfo().dwPageSize
except WindowsError:
pageSize = 0x1000
except NameError:
pageSize = 0x1000
cls.pageSize = pageSize # now this function won't be called again
return pageSize
@classmethod
def align_address_to_page_start(cls, address):
"""
Align the given address to the start of the page it occupies.
@type address: int
@param address: Memory address.
@rtype: int
@return: Aligned memory address.
"""
return address - ( address % cls.pageSize )
@classmethod
def align_address_to_page_end(cls, address):
"""
Align the given address to the end of the page it occupies.
That is, to point to the start of the next page.
@type address: int
@param address: Memory address.
@rtype: int
@return: Aligned memory address.
"""
return address + cls.pageSize - ( address % cls.pageSize )
@classmethod
def align_address_range(cls, begin, end):
"""
Align the given address range to the start and end of the page(s) it occupies.
@type begin: int
@param begin: Memory address of the beginning of the buffer.
Use C{None} for the first legal address in the address space.
@type end: int
@param end: Memory address of the end of the buffer.
Use C{None} for the last legal address in the address space.
@rtype: tuple( int, int )
@return: Aligned memory addresses.
"""
if begin is None:
begin = 0
if end is None:
end = win32.LPVOID(-1).value # XXX HACK
if end < begin:
begin, end = end, begin
begin = cls.align_address_to_page_start(begin)
if end != cls.align_address_to_page_start(end):
end = cls.align_address_to_page_end(end)
return (begin, end)
@classmethod
def get_buffer_size_in_pages(cls, address, size):
"""
Get the number of pages in use by the given buffer.
@type address: int
@param address: Aligned memory address.
@type size: int
@param size: Buffer size.
@rtype: int
@return: Buffer size in number of pages.
"""
if size < 0:
size = -size
address = address - size
begin, end = cls.align_address_range(address, address + size)
# XXX FIXME
# I think this rounding fails at least for address 0xFFFFFFFF size 1
return int(float(end - begin) / float(cls.pageSize))
@staticmethod
def do_ranges_intersect(begin, end, old_begin, old_end):
"""
Determine if the two given memory address ranges intersect.
@type begin: int
@param begin: Start address of the first range.
@type end: int
@param end: End address of the first range.
@type old_begin: int
@param old_begin: Start address of the second range.
@type old_end: int
@param old_end: End address of the second range.
@rtype: bool
@return: C{True} if the two ranges intersect, C{False} otherwise.
"""
return (old_begin <= begin < old_end) or \
(old_begin < end <= old_end) or \
(begin <= old_begin < end) or \
(begin < old_end <= end)
#==============================================================================
def CustomAddressIterator(memory_map, condition):
"""
Generator function that iterates through a memory map, filtering memory
region blocks by any given condition.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@type condition: function
@param condition: Callback function that returns C{True} if the memory
block should be returned, or C{False} if it should be filtered.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
for mbi in memory_map:
if condition(mbi):
address = mbi.BaseAddress
max_addr = address + mbi.RegionSize
while address < max_addr:
yield address
address = address + 1
def DataAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that contain data.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.has_content)
def ImageAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that belong to executable images.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_image)
def MappedAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that belong to memory mapped files.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_mapped)
def ReadableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are readable.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_readable)
def WriteableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are writeable.
@note: Writeable memory is always readable too.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_writeable)
def ExecutableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are executable.
@note: Executable memory is always readable too.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_executable)
def ExecutableAndWriteableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are executable and writeable.
@note: The presence of such pages make memory corruption vulnerabilities
much easier to exploit.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_executable_and_writeable)
#==============================================================================
try:
_registerMask = win32.SIZE_T(-1).value
except TypeError:
if win32.SIZEOF(win32.SIZE_T) == 4:
_registerMask = 0xFFFFFFFF
elif win32.SIZEOF(win32.SIZE_T) == 8:
_registerMask = 0xFFFFFFFFFFFFFFFF
else:
raise
class DebugRegister (StaticClass):
"""
Class to manipulate debug registers.
Used by L{HardwareBreakpoint}.
@group Trigger flags used by HardwareBreakpoint:
BREAK_ON_EXECUTION, BREAK_ON_WRITE, BREAK_ON_ACCESS, BREAK_ON_IO_ACCESS
@group Size flags used by HardwareBreakpoint:
WATCH_BYTE, WATCH_WORD, WATCH_DWORD, WATCH_QWORD
@group Bitwise masks for Dr7:
enableMask, disableMask, triggerMask, watchMask, clearMask,
generalDetectMask
@group Bitwise masks for Dr6:
hitMask, hitMaskAll, debugAccessMask, singleStepMask, taskSwitchMask,
clearDr6Mask, clearHitMask
@group Debug control MSR definitions:
DebugCtlMSR, LastBranchRecord, BranchTrapFlag, PinControl,
LastBranchToIP, LastBranchFromIP,
LastExceptionToIP, LastExceptionFromIP
@type BREAK_ON_EXECUTION: int
@cvar BREAK_ON_EXECUTION: Break on execution.
@type BREAK_ON_WRITE: int
@cvar BREAK_ON_WRITE: Break on write.
@type BREAK_ON_ACCESS: int
@cvar BREAK_ON_ACCESS: Break on read or write.
@type BREAK_ON_IO_ACCESS: int
@cvar BREAK_ON_IO_ACCESS: Break on I/O port access.
Not supported by any hardware.
@type WATCH_BYTE: int
@cvar WATCH_BYTE: Watch a byte.
@type WATCH_WORD: int
@cvar WATCH_WORD: Watch a word.
@type WATCH_DWORD: int
@cvar WATCH_DWORD: Watch a double word.
@type WATCH_QWORD: int
@cvar WATCH_QWORD: Watch one quad word.
@type enableMask: 4-tuple of integers
@cvar enableMask:
Enable bit on C{Dr7} for each slot.
Works as a bitwise-OR mask.
@type disableMask: 4-tuple of integers
@cvar disableMask:
Mask of the enable bit on C{Dr7} for each slot.
Works as a bitwise-AND mask.
@type triggerMask: 4-tuple of 2-tuples of integers
@cvar triggerMask:
Trigger bits on C{Dr7} for each trigger flag value.
Each 2-tuple has the bitwise-OR mask and the bitwise-AND mask.
@type watchMask: 4-tuple of 2-tuples of integers
@cvar watchMask:
Watch bits on C{Dr7} for each watch flag value.
Each 2-tuple has the bitwise-OR mask and the bitwise-AND mask.
@type clearMask: 4-tuple of integers
@cvar clearMask:
Mask of all important bits on C{Dr7} for each slot.
Works as a bitwise-AND mask.
@type generalDetectMask: integer
@cvar generalDetectMask:
General detect mode bit. It enables the processor to notify the
debugger when the debugee is trying to access one of the debug
registers.
@type hitMask: 4-tuple of integers
@cvar hitMask:
Hit bit on C{Dr6} for each slot.
Works as a bitwise-AND mask.
@type hitMaskAll: integer
@cvar hitMaskAll:
Bitmask for all hit bits in C{Dr6}. Useful to know if at least one
hardware breakpoint was hit, or to clear the hit bits only.
@type clearHitMask: integer
@cvar clearHitMask:
Bitmask to clear all the hit bits in C{Dr6}.
@type debugAccessMask: integer
@cvar debugAccessMask:
The debugee tried to access a debug register. Needs bit
L{generalDetectMask} enabled in C{Dr7}.
@type singleStepMask: integer
@cvar singleStepMask:
A single step exception was raised. Needs the trap flag enabled.
@type taskSwitchMask: integer
@cvar taskSwitchMask:
A task switch has occurred. Needs the TSS T-bit set to 1.
@type clearDr6Mask: integer
@cvar clearDr6Mask:
Bitmask to clear all meaningful bits in C{Dr6}.
"""
BREAK_ON_EXECUTION = 0
BREAK_ON_WRITE = 1
BREAK_ON_ACCESS = 3
BREAK_ON_IO_ACCESS = 2
WATCH_BYTE = 0
WATCH_WORD = 1
WATCH_DWORD = 3
WATCH_QWORD = 2
registerMask = _registerMask
#------------------------------------------------------------------------------
###########################################################################
# http://en.wikipedia.org/wiki/Debug_register
#
# DR7 - Debug control
#
# The low-order eight bits of DR7 (0,2,4,6 and 1,3,5,7) selectively enable
# the four address breakpoint conditions. There are two levels of enabling:
# the local (0,2,4,6) and global (1,3,5,7) levels. The local enable bits
# are automatically reset by the processor at every task switch to avoid
# unwanted breakpoint conditions in the new task. The global enable bits
# are not reset by a task switch; therefore, they can be used for
# conditions that are global to all tasks.
#
# Bits 16-17 (DR0), 20-21 (DR1), 24-25 (DR2), 28-29 (DR3), define when
# breakpoints trigger. Each breakpoint has a two-bit entry that specifies
# whether they break on execution (00b), data write (01b), data read or
# write (11b). 10b is defined to mean break on IO read or write but no
# hardware supports it. Bits 18-19 (DR0), 22-23 (DR1), 26-27 (DR2), 30-31
# (DR3), define how large area of memory is watched by breakpoints. Again
# each breakpoint has a two-bit entry that specifies whether they watch
# one (00b), two (01b), eight (10b) or four (11b) bytes.
###########################################################################
# Dr7 |= enableMask[register]
enableMask = (
1 << 0, # Dr0 (bit 0)
1 << 2, # Dr1 (bit 2)
1 << 4, # Dr2 (bit 4)
1 << 6, # Dr3 (bit 6)
)
# Dr7 &= disableMask[register]
disableMask = tuple( [_registerMask ^ x for x in enableMask] ) # The registerMask from the class is not there in py3
try:
del x # It's not there in py3
except:
pass
# orMask, andMask = triggerMask[register][trigger]
# Dr7 = (Dr7 & andMask) | orMask # to set
# Dr7 = Dr7 & andMask # to remove
triggerMask = (
# Dr0 (bits 16-17)
(
((0 << 16), (3 << 16) ^ registerMask), # execute
((1 << 16), (3 << 16) ^ registerMask), # write
((2 << 16), (3 << 16) ^ registerMask), # io read
((3 << 16), (3 << 16) ^ registerMask), # access
),
# Dr1 (bits 20-21)
(
((0 << 20), (3 << 20) ^ registerMask), # execute
((1 << 20), (3 << 20) ^ registerMask), # write
((2 << 20), (3 << 20) ^ registerMask), # io read
((3 << 20), (3 << 20) ^ registerMask), # access
),
# Dr2 (bits 24-25)
(
((0 << 24), (3 << 24) ^ registerMask), # execute
((1 << 24), (3 << 24) ^ registerMask), # write
((2 << 24), (3 << 24) ^ registerMask), # io read
((3 << 24), (3 << 24) ^ registerMask), # access
),
# Dr3 (bits 28-29)
(
((0 << 28), (3 << 28) ^ registerMask), # execute
((1 << 28), (3 << 28) ^ registerMask), # write
((2 << 28), (3 << 28) ^ registerMask), # io read
((3 << 28), (3 << 28) ^ registerMask), # access
),
)
# orMask, andMask = watchMask[register][watch]
# Dr7 = (Dr7 & andMask) | orMask # to set
# Dr7 = Dr7 & andMask # to remove
watchMask = (
# Dr0 (bits 18-19)
(
((0 << 18), (3 << 18) ^ registerMask), # byte
((1 << 18), (3 << 18) ^ registerMask), # word
((2 << 18), (3 << 18) ^ registerMask), # qword
((3 << 18), (3 << 18) ^ registerMask), # dword
),
# Dr1 (bits 22-23)
(
((0 << 23), (3 << 23) ^ registerMask), # byte
((1 << 23), (3 << 23) ^ registerMask), # word
((2 << 23), (3 << 23) ^ registerMask), # qword
((3 << 23), (3 << 23) ^ registerMask), # dword
),
# Dr2 (bits 26-27)
(
((0 << 26), (3 << 26) ^ registerMask), # byte
((1 << 26), (3 << 26) ^ registerMask), # word
((2 << 26), (3 << 26) ^ registerMask), # qword
((3 << 26), (3 << 26) ^ registerMask), # dword
),
# Dr3 (bits 30-31)
(
((0 << 30), (3 << 31) ^ registerMask), # byte
((1 << 30), (3 << 31) ^ registerMask), # word
((2 << 30), (3 << 31) ^ registerMask), # qword
((3 << 30), (3 << 31) ^ registerMask), # dword
),
)
# Dr7 = Dr7 & clearMask[register]
clearMask = (
registerMask ^ ( (1 << 0) + (3 << 16) + (3 << 18) ), # Dr0
registerMask ^ ( (1 << 2) + (3 << 20) + (3 << 22) ), # Dr1
registerMask ^ ( (1 << 4) + (3 << 24) + (3 << 26) ), # Dr2
registerMask ^ ( (1 << 6) + (3 << 28) + (3 << 30) ), # Dr3
)
# Dr7 = Dr7 | generalDetectMask
generalDetectMask = (1 << 13)
###########################################################################
# http://en.wikipedia.org/wiki/Debug_register
#
# DR6 - Debug status
#
# The debug status register permits the debugger to determine which debug
# conditions have occurred. When the processor detects an enabled debug
# exception, it sets the low-order bits of this register (0,1,2,3) before
# entering the debug exception handler.
#
# Note that the bits of DR6 are never cleared by the processor. To avoid
# any confusion in identifying the next debug exception, the debug handler
# should move zeros to DR6 immediately before returning.
###########################################################################
# bool(Dr6 & hitMask[register])
hitMask = (
(1 << 0), # Dr0
(1 << 1), # Dr1
(1 << 2), # Dr2
(1 << 3), # Dr3
)
# bool(Dr6 & anyHitMask)
hitMaskAll = hitMask[0] | hitMask[1] | hitMask[2] | hitMask[3]
# Dr6 = Dr6 & clearHitMask
clearHitMask = registerMask ^ hitMaskAll
# bool(Dr6 & debugAccessMask)
debugAccessMask = (1 << 13)
# bool(Dr6 & singleStepMask)
singleStepMask = (1 << 14)
# bool(Dr6 & taskSwitchMask)
taskSwitchMask = (1 << 15)
# Dr6 = Dr6 & clearDr6Mask
clearDr6Mask = registerMask ^ (hitMaskAll | \
debugAccessMask | singleStepMask | taskSwitchMask)
#------------------------------------------------------------------------------
###############################################################################
#
# (from the AMD64 manuals)
#
# The fields within the DebugCtlMSR register are:
#
# Last-Branch Record (LBR) - Bit 0, read/write. Software sets this bit to 1
# to cause the processor to record the source and target addresses of the
# last control transfer taken before a debug exception occurs. The recorded
# control transfers include branch instructions, interrupts, and exceptions.
#
# Branch Single Step (BTF) - Bit 1, read/write. Software uses this bit to
# change the behavior of the rFLAGS.TF bit. When this bit is cleared to 0,
# the rFLAGS.TF bit controls instruction single stepping, (normal behavior).
# When this bit is set to 1, the rFLAGS.TF bit controls single stepping on
# control transfers. The single-stepped control transfers include branch
# instructions, interrupts, and exceptions. Control-transfer single stepping
# requires both BTF=1 and rFLAGS.TF=1.
#
# Performance-Monitoring/Breakpoint Pin-Control (PBi) - Bits 5-2, read/write.
# Software uses these bits to control the type of information reported by
# the four external performance-monitoring/breakpoint pins on the processor.
# When a PBi bit is cleared to 0, the corresponding external pin (BPi)
# reports performance-monitor information. When a PBi bit is set to 1, the
# corresponding external pin (BPi) reports breakpoint information.
#
# All remaining bits in the DebugCtlMSR register are reserved.
#
# Software can enable control-transfer single stepping by setting
# DebugCtlMSR.BTF to 1 and rFLAGS.TF to 1. The processor automatically
# disables control-transfer single stepping when a debug exception (#DB)
# occurs by clearing DebugCtlMSR.BTF to 0. rFLAGS.TF is also cleared when a
# #DB exception occurs. Before exiting the debug-exception handler, software
# must set both DebugCtlMSR.BTF and rFLAGS.TF to 1 to restart single
# stepping.
#
###############################################################################
DebugCtlMSR = 0x1D9
LastBranchRecord = (1 << 0)
BranchTrapFlag = (1 << 1)
PinControl = (
(1 << 2), # PB1
(1 << 3), # PB2
(1 << 4), # PB3
(1 << 5), # PB4
)
###############################################################################
#
# (from the AMD64 manuals)
#
# Control-transfer recording MSRs: LastBranchToIP, LastBranchFromIP,
# LastExceptionToIP, and LastExceptionFromIP. These registers are loaded
# automatically by the processor when the DebugCtlMSR.LBR bit is set to 1.
# These MSRs are read-only.
#
# The processor automatically disables control-transfer recording when a
# debug exception (#DB) occurs by clearing DebugCtlMSR.LBR to 0. The
# contents of the control-transfer recording MSRs are not altered by the
# processor when the #DB occurs. Before exiting the debug-exception handler,
# software can set DebugCtlMSR.LBR to 1 to re-enable the recording mechanism.
#
###############################################################################
LastBranchToIP = 0x1DC
LastBranchFromIP = 0x1DB
LastExceptionToIP = 0x1DE
LastExceptionFromIP = 0x1DD
#------------------------------------------------------------------------------
@classmethod
def clear_bp(cls, ctx, register):
"""
Clears a hardware breakpoint.
@see: find_slot, set_bp
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@type register: int
@param register: Slot (debug register) for hardware breakpoint.
"""
ctx['Dr7'] &= cls.clearMask[register]
ctx['Dr%d' % register] = 0
@classmethod
def set_bp(cls, ctx, register, address, trigger, watch):
"""
Sets a hardware breakpoint.
@see: clear_bp, find_slot
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@type register: int
@param register: Slot (debug register).
@type address: int
@param address: Memory address.
@type trigger: int
@param trigger: Trigger flag. See L{HardwareBreakpoint.validTriggers}.
@type watch: int
@param watch: Watch flag. See L{HardwareBreakpoint.validWatchSizes}.
"""
Dr7 = ctx['Dr7']
Dr7 |= cls.enableMask[register]
orMask, andMask = cls.triggerMask[register][trigger]
Dr7 &= andMask
Dr7 |= orMask
orMask, andMask = cls.watchMask[register][watch]
Dr7 &= andMask
Dr7 |= orMask
ctx['Dr7'] = Dr7
ctx['Dr%d' % register] = address
@classmethod
def find_slot(cls, ctx):
"""
Finds an empty slot to set a hardware breakpoint.
@see: clear_bp, set_bp
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@rtype: int
@return: Slot (debug register) for hardware breakpoint.
"""
Dr7 = ctx['Dr7']
slot = 0
for m in cls.enableMask:
if (Dr7 & m) == 0:
return slot
slot += 1
return None
| mit |
JCBarahona/edX | common/djangoapps/third_party_auth/tests/test_settings.py | 63 | 2412 | """Unit tests for settings.py."""
from third_party_auth import provider, settings
from third_party_auth.tests import testutil
import unittest
_ORIGINAL_AUTHENTICATION_BACKENDS = ('first_authentication_backend',)
_ORIGINAL_INSTALLED_APPS = ('first_installed_app',)
_ORIGINAL_MIDDLEWARE_CLASSES = ('first_middleware_class',)
_ORIGINAL_TEMPLATE_CONTEXT_PROCESSORS = ('first_template_context_preprocessor',)
_SETTINGS_MAP = {
'AUTHENTICATION_BACKENDS': _ORIGINAL_AUTHENTICATION_BACKENDS,
'INSTALLED_APPS': _ORIGINAL_INSTALLED_APPS,
'MIDDLEWARE_CLASSES': _ORIGINAL_MIDDLEWARE_CLASSES,
'TEMPLATE_CONTEXT_PROCESSORS': _ORIGINAL_TEMPLATE_CONTEXT_PROCESSORS,
'FEATURES': {},
}
class SettingsUnitTest(testutil.TestCase):
"""Unit tests for settings management code."""
# Allow access to protected methods (or module-protected methods) under test.
# pylint: disable=protected-access
# Suppress sprurious no-member warning on fakes.
# pylint: disable=no-member
def setUp(self):
super(SettingsUnitTest, self).setUp()
self.settings = testutil.FakeDjangoSettings(_SETTINGS_MAP)
def test_apply_settings_adds_exception_middleware(self):
settings.apply_settings(self.settings)
for middleware_name in settings._MIDDLEWARE_CLASSES:
self.assertIn(middleware_name, self.settings.MIDDLEWARE_CLASSES)
def test_apply_settings_adds_fields_stored_in_session(self):
settings.apply_settings(self.settings)
self.assertEqual(settings._FIELDS_STORED_IN_SESSION, self.settings.FIELDS_STORED_IN_SESSION)
def test_apply_settings_adds_third_party_auth_to_installed_apps(self):
settings.apply_settings(self.settings)
self.assertIn('third_party_auth', self.settings.INSTALLED_APPS)
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
def test_apply_settings_enables_no_providers_by_default(self):
# Providers are only enabled via ConfigurationModels in the database
settings.apply_settings(self.settings)
self.assertEqual([], provider.Registry.enabled())
def test_apply_settings_turns_off_raising_social_exceptions(self):
# Guard against submitting a conf change that's convenient in dev but
# bad in prod.
settings.apply_settings(self.settings)
self.assertFalse(self.settings.SOCIAL_AUTH_RAISE_EXCEPTIONS)
| agpl-3.0 |
JCBarahona/edX | openedx/core/djangoapps/credit/signals.py | 79 | 3915 | """
This file contains receivers of course publication signals.
"""
import logging
from django.dispatch import receiver
from django.utils import timezone
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
from openedx.core.djangoapps.credit.verification_access import update_verification_partitions
from xmodule.modulestore.django import SignalHandler
log = logging.getLogger(__name__)
def on_course_publish(course_key):
"""
Will receive a delegated 'course_published' signal from cms/djangoapps/contentstore/signals.py
and kick off a celery task to update the credit course requirements.
IMPORTANT: It is assumed that the edx-proctoring subsystem has been appropriate refreshed
with any on_publish event workflow *BEFORE* this method is called.
"""
# Import here, because signal is registered at startup, but items in tasks
# are not yet able to be loaded
from openedx.core.djangoapps.credit import api, tasks
if api.is_credit_course(course_key):
tasks.update_credit_course_requirements.delay(unicode(course_key))
log.info(u'Added task to update credit requirements for course "%s" to the task queue', course_key)
@receiver(SignalHandler.pre_publish)
def on_pre_publish(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Create user partitions for verification checkpoints.
This is a pre-publish step since we need to write to the course descriptor.
"""
from openedx.core.djangoapps.credit import api
if api.is_credit_course(course_key):
# For now, we are tagging content with in-course-reverification access groups
# only in credit courses on publish. In the long run, this is not where we want to put this.
# This really should be a transformation on the course structure performed as a pre-processing
# step by the LMS, and the transformation should be owned by the verify_student app.
# Since none of that infrastructure currently exists, we're doing it this way instead.
log.info(u"Starting to update in-course reverification access rules")
update_verification_partitions(course_key)
log.info(u"Finished updating in-course reverification access rules")
@receiver(GRADES_UPDATED)
def listen_for_grade_calculation(sender, username, grade_summary, course_key, deadline, **kwargs): # pylint: disable=unused-argument
"""Receive 'MIN_GRADE_REQUIREMENT_STATUS' signal and update minimum grade
requirement status.
Args:
sender: None
username(string): user name
grade_summary(dict): Dict containing output from the course grader
course_key(CourseKey): The key for the course
deadline(datetime): Course end date or None
Kwargs:
kwargs : None
"""
# This needs to be imported here to avoid a circular dependency
# that can cause syncdb to fail.
from openedx.core.djangoapps.credit import api
course_id = CourseKey.from_string(unicode(course_key))
is_credit = api.is_credit_course(course_id)
if is_credit:
requirements = api.get_credit_requirements(course_id, namespace='grade')
if requirements:
criteria = requirements[0].get('criteria')
if criteria:
min_grade = criteria.get('min_grade')
if grade_summary['percent'] >= min_grade:
reason_dict = {'final_grade': grade_summary['percent']}
api.set_credit_requirement_status(
username, course_id, 'grade', 'grade', status="satisfied", reason=reason_dict
)
elif deadline and deadline < timezone.now():
api.set_credit_requirement_status(
username, course_id, 'grade', 'grade', status="failed", reason={}
)
| agpl-3.0 |
jshiv/turntable | test/lib/python2.7/site-packages/numpy/lib/tests/test_financial.py | 36 | 6390 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_almost_equal
)
class TestFinancial(TestCase):
def test_rate(self):
assert_almost_equal(np.rate(10, 0, -3500, 10000),
0.1107, 4)
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
assert_almost_equal(np.irr(v),
0.0524, 2)
v = [-100, 0, 0, 74]
assert_almost_equal(np.irr(v),
-0.0955, 2)
v = [-100, 39, 59, 55, 20]
assert_almost_equal(np.irr(v),
0.28095, 2)
v = [-100, 100, 0, -7]
assert_almost_equal(np.irr(v),
-0.0833, 2)
v = [-100, 100, 0, 7]
assert_almost_equal(np.irr(v),
0.06206, 2)
v = [-5, 10.5, 1, -8, 1]
assert_almost_equal(np.irr(v),
0.0886, 2)
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0),
-127128.17, 2)
def test_fv(self):
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
86609.36, 2)
def test_pmt(self):
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000),
-304.146, 3)
def test_ppmt(self):
np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25
def test_ipmt(self):
np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
21.54, 2)
def test_nper2(self):
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
50.0, 1)
def test_npv(self):
assert_almost_equal(
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
val = [-120000, 39000, 30000, 21000, 37000, 46000]
assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
val = [100, 200, -50, 300, -200]
assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
def test_when(self):
#begin
assert_almost_equal(np.rate(10, 20, -3500, 10000, 1),
np.rate(10, 20, -3500, 10000, 'begin'), 4)
#end
assert_almost_equal(np.rate(10, 20, -3500, 10000),
np.rate(10, 20, -3500, 10000, 'end'), 4)
assert_almost_equal(np.rate(10, 20, -3500, 10000, 0),
np.rate(10, 20, -3500, 10000, 'end'), 4)
# begin
assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1),
np.pv(0.07, 20, 12000, 0, 'begin'), 2)
# end
assert_almost_equal(np.pv(0.07, 20, 12000, 0),
np.pv(0.07, 20, 12000, 0, 'end'), 2)
assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0),
np.pv(0.07, 20, 12000, 0, 'end'), 2)
# begin
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1),
np.fv(0.075, 20, -2000, 0, 'begin'), 4)
# end
assert_almost_equal(np.fv(0.075, 20, -2000, 0),
np.fv(0.075, 20, -2000, 0, 'end'), 4)
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
np.fv(0.075, 20, -2000, 0, 'end'), 4)
# begin
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1),
np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4)
# end
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0),
np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0),
np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
# begin
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4)
# end
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
# begin
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4)
# end
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
# begin
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1),
np.nper(0.075, -2000, 0, 100000., 'begin'), 4)
# end
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
np.nper(0.075, -2000, 0, 100000., 'end'), 4)
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0),
np.nper(0.075, -2000, 0, 100000., 'end'), 4)
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
-15.40102862, -14.76028842], 4)
assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000),
[-74.998201, -75.62318601, -76.25337923,
-76.88882405, -77.52956425], 4)
assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0,
[0, 0, 1, 'end', 'begin']),
[-74.998201, -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
if __name__ == "__main__":
run_module_suite()
| mit |
tpaviot/smesh | test/gtest-1.7.0/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| lgpl-2.1 |
knehez/edx-platform | lms/djangoapps/courseware/tests/__init__.py | 101 | 4972 | """
integration tests for xmodule
Contains:
1. BaseTestXmodule class provides course and users
for testing Xmodules with mongo store.
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.test.client import Client
from edxmako.shortcuts import render_to_string
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE
from xblock.field_data import DictFieldData
from xmodule.tests import get_test_system, get_test_descriptor_system
from opaque_keys.edx.locations import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from lms.djangoapps.lms_xblock.runtime import quote_slashes
class BaseTestXmodule(ModuleStoreTestCase):
"""Base class for testing Xmodules with mongo store.
This class prepares course and users for tests:
1. create test course;
2. create, enroll and login users for this course;
Any xmodule should overwrite only next parameters for test:
1. CATEGORY
2. DATA or METADATA
3. MODEL_DATA
4. COURSE_DATA and USER_COUNT if needed
This class should not contain any tests, because CATEGORY
should be defined in child class.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
USER_COUNT = 2
COURSE_DATA = {}
# Data from YAML common/lib/xmodule/xmodule/templates/NAME/default.yaml
CATEGORY = "vertical"
DATA = ''
# METADATA must be overwritten for every instance that uses it. Otherwise,
# if we'll change it in the tests, it will be changed for all other instances
# of parent class.
METADATA = {}
MODEL_DATA = {'data': '<some_module></some_module>'}
def new_module_runtime(self):
"""
Generate a new ModuleSystem that is minimally set up for testing
"""
return get_test_system(course_id=self.course.id)
def new_descriptor_runtime(self):
runtime = get_test_descriptor_system()
runtime.get_block = modulestore().get_item
return runtime
def initialize_module(self, **kwargs):
kwargs.update({
'parent_location': self.section.location,
'category': self.CATEGORY
})
self.item_descriptor = ItemFactory.create(**kwargs)
self.runtime = self.new_descriptor_runtime()
field_data = {}
field_data.update(self.MODEL_DATA)
student_data = DictFieldData(field_data)
self.item_descriptor._field_data = LmsFieldData(self.item_descriptor._field_data, student_data)
self.item_descriptor.xmodule_runtime = self.new_module_runtime()
#self.item_module = self.item_descriptor.xmodule_runtime.xmodule_instance
#self.item_module is None at this time
self.item_url = self.item_descriptor.location.to_deprecated_string()
def setup_course(self):
self.course = CourseFactory.create(data=self.COURSE_DATA)
# Turn off cache.
modulestore().request_cache = None
modulestore().metadata_inheritance_cache_subsystem = None
chapter = ItemFactory.create(
parent_location=self.course.location,
category="sequential",
)
self.section = ItemFactory.create(
parent_location=chapter.location,
category="sequential"
)
# username = robot{0}, password = 'test'
self.users = [
UserFactory.create()
for dummy0 in range(self.USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
# login all users for acces to Xmodule
self.clients = {user.username: Client() for user in self.users}
self.login_statuses = [
self.clients[user.username].login(
username=user.username, password='test')
for user in self.users
]
self.assertTrue(all(self.login_statuses))
def setUp(self):
super(BaseTestXmodule, self).setUp()
self.setup_course()
self.initialize_module(metadata=self.METADATA, data=self.DATA)
def get_url(self, dispatch):
"""Return item url with dispatch."""
return reverse(
'xblock_handler',
args=(self.course.id.to_deprecated_string(), quote_slashes(self.item_url), 'xmodule_handler', dispatch)
)
class XModuleRenderingTestBase(BaseTestXmodule):
def new_module_runtime(self):
"""
Create a runtime that actually does html rendering
"""
runtime = super(XModuleRenderingTestBase, self).new_module_runtime()
runtime.render_template = render_to_string
return runtime
| agpl-3.0 |
prefetchnta/questlab | bin/python/Lib/html/parser.py | 5 | 21381 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import warnings
import _markupbase
from html import unescape
__all__ = ['HTMLParser']
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change tagfind/attrfind remember to update locatestarttagend too;
# 3) if you change tagfind/attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
_default_sentinel = object()
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). If convert_charrefs is
True the character references are converted automatically to the
corresponding Unicode character (and self.handle_data() is no
longer split in chunks), otherwise they are passed by calling
self.handle_entityref() or self.handle_charref() with the string
containing respectively the named or numeric reference as the
argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=_default_sentinel, *,
convert_charrefs=_default_sentinel):
"""Initialize and reset this instance.
If convert_charrefs is True (default: False), all character references
are automatically converted to the corresponding Unicode characters.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
and argument are deprecated.
"""
if strict is not _default_sentinel:
warnings.warn("The strict argument and mode are deprecated.",
DeprecationWarning, stacklevel=2)
else:
strict = False # default
self.strict = strict
if convert_charrefs is _default_sentinel:
convert_charrefs = False # default
warnings.warn("The value of convert_charrefs will become True in "
"3.5. You are encouraged to set the value explicitly.",
DeprecationWarning, stacklevel=2)
self.convert_charrefs = convert_charrefs
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
warnings.warn("The 'error' method is deprecated.",
DeprecationWarning, stacklevel=2)
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.convert_charrefs and not self.cdata_elem:
j = rawdata.find('<', i)
if j < 0:
# if we can't find the next <, either we are at the end
# or there's more text incoming. If the latter is True,
# we can't pass the text to handle_data in case we have
# a charref cut in half at end. Try to determine if
# this is the case before proceding by looking for an
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n-34))
if (amppos >= 0 and
not re.compile(r'[\s;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:j]))
else:
self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:k]))
else:
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:n]))
else:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if self.strict:
match = tagfind.match(rawdata, i+1)
else:
match = tagfind_tolerant.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
warnings.warn('The unescape method is deprecated and will be removed '
'in 3.5, use html.unescape() instead.',
DeprecationWarning, stacklevel=2)
return unescape(s)
| lgpl-2.1 |
JioEducation/edx-platform | cms/djangoapps/contentstore/management/commands/clean_cert_name.py | 37 | 7765 | """
A single-use management command that provides an interactive way to remove
erroneous certificate names.
"""
from collections import namedtuple
from django.core.management.base import BaseCommand
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
Result = namedtuple("Result", ["course_key", "cert_name_short", "cert_name_long", "should_clean"])
class Command(BaseCommand):
"""
A management command that provides an interactive way to remove erroneous cert_name_long and
cert_name_short course attributes across both the Split and Mongo modulestores.
"""
help = 'Allows manual clean-up of invalid cert_name_short and cert_name_long entries on CourseModules'
def _mongo_results(self):
"""
Return Result objects for any mongo-modulestore backend course that has
cert_name_short or cert_name_long set.
"""
# N.B. This code breaks many abstraction barriers. That's ok, because
# it's a one-time cleanup command.
# pylint: disable=protected-access
mongo_modulestore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
old_mongo_courses = mongo_modulestore.collection.find({
"_id.category": "course",
"$or": [
{"metadata.cert_name_short": {"$exists": 1}},
{"metadata.cert_name_long": {"$exists": 1}},
]
}, {
"_id": True,
"metadata.cert_name_short": True,
"metadata.cert_name_long": True,
})
return [
Result(
mongo_modulestore.make_course_key(
course['_id']['org'],
course['_id']['course'],
course['_id']['name'],
),
course['metadata'].get('cert_name_short'),
course['metadata'].get('cert_name_long'),
True
) for course in old_mongo_courses
]
def _split_results(self):
"""
Return Result objects for any split-modulestore backend course that has
cert_name_short or cert_name_long set.
"""
# N.B. This code breaks many abstraction barriers. That's ok, because
# it's a one-time cleanup command.
# pylint: disable=protected-access
split_modulestore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split)
active_version_collection = split_modulestore.db_connection.course_index
structure_collection = split_modulestore.db_connection.structures
branches = active_version_collection.aggregate([{
'$group': {
'_id': 1,
'draft': {'$push': '$versions.draft-branch'},
'published': {'$push': '$versions.published-branch'}
}
}, {
'$project': {
'_id': 1,
'branches': {'$setUnion': ['$draft', '$published']}
}
}])['result'][0]['branches']
structures = list(
structure_collection.find({
'_id': {'$in': branches},
'blocks': {'$elemMatch': {
'$and': [
{"block_type": "course"},
{'$or': [
{'fields.cert_name_long': {'$exists': True}},
{'fields.cert_name_short': {'$exists': True}}
]}
]
}}
}, {
'_id': True,
'blocks.fields.cert_name_long': True,
'blocks.fields.cert_name_short': True,
})
)
structure_map = {struct['_id']: struct for struct in structures}
structure_ids = [struct['_id'] for struct in structures]
split_mongo_courses = list(active_version_collection.find({
'$or': [
{"versions.draft-branch": {'$in': structure_ids}},
{"versions.published": {'$in': structure_ids}},
]
}, {
'org': True,
'course': True,
'run': True,
'versions': True,
}))
for course in split_mongo_courses:
draft = course['versions'].get('draft-branch')
if draft in structure_map:
draft_fields = structure_map[draft]['blocks'][0].get('fields', {})
else:
draft_fields = {}
published = course['versions'].get('published')
if published in structure_map:
published_fields = structure_map[published]['blocks'][0].get('fields', {})
else:
published_fields = {}
for fields in (draft_fields, published_fields):
for field in ('cert_name_short', 'cert_name_long'):
if field in fields:
course[field] = fields[field]
return [
Result(
split_modulestore.make_course_key(
course['org'],
course['course'],
course['run'],
),
course.get('cert_name_short'),
course.get('cert_name_long'),
True
) for course in split_mongo_courses
]
def _display(self, results):
"""
Render a list of Result objects as a nicely formatted table.
"""
headers = ["Course Key", "cert_name_short", "cert_name_short", "Should clean?"]
col_widths = [
max(len(unicode(result[col])) for result in results + [headers])
for col in range(len(results[0]))
]
id_format = "{{:>{}}} |".format(len(unicode(len(results))))
col_format = "| {{:>{}}} |"
self.stdout.write(id_format.format(""), ending='')
for header, width in zip(headers, col_widths):
self.stdout.write(col_format.format(width).format(header), ending='')
self.stdout.write('')
for idx, result in enumerate(results):
self.stdout.write(id_format.format(idx), ending='')
for col, width in zip(result, col_widths):
self.stdout.write(col_format.format(width).format(unicode(col)), ending='')
self.stdout.write("")
def _commit(self, results):
"""
For each Result in ``results``, if ``should_clean`` is True, remove cert_name_long
and cert_name_short from the course and save in the backing modulestore.
"""
for result in results:
if not result.should_clean:
continue
course = modulestore().get_course(result.course_key)
del course.cert_name_short
del course.cert_name_long
modulestore().update_item(course, ModuleStoreEnum.UserID.mgmt_command)
def handle(self, *args, **options):
results = self._mongo_results() + self._split_results()
self.stdout.write("Type the index of a row to toggle whether it will be cleaned, "
"'commit' to remove all cert_name_short and cert_name_long values "
"from any rows marked for cleaning, or 'quit' to quit.")
while True:
self._display(results)
command = raw_input("<index>|commit|quit: ").strip()
if command == 'quit':
return
elif command == 'commit':
self._commit(results)
return
elif command == '':
continue
else:
index = int(command)
results[index] = results[index]._replace(should_clean=not results[index].should_clean)
| agpl-3.0 |
gbiggs/rtshell | rtshell/rtdel.py | 2 | 4009 | #!/usr/bin/env python2
# -*- Python -*-
# -*- coding: utf-8 -*-
'''rtshell
Copyright (C) 2009-2015
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the GNU Lesser General Public License version 3.
http://www.gnu.org/licenses/lgpl-3.0.en.html
Implementation of deleting an object from a name server.
'''
from __future__ import print_function
import optparse
import os
import os.path
import rtctree.tree
import rtctree.path
import sys
import traceback
from rtshell import path
from rtshell import rts_exceptions
import rtshell
def delete_object_reference(cmd_path, full_path, options, tree=None):
path, port = rtctree.path.parse_path(full_path)
if port:
raise rts_exceptions.UndeletableObjectError(cmd_path)
if not path[-1]:
path = path[:-1]
# Cannot delete name servers
if len(path) == 2:
raise rts_exceptions.UndeletableObjectError(cmd_path)
if not tree:
tree = rtctree.tree.RTCTree(paths=path, filter=[path])
if options.zombies and not tree.is_zombie(path):
raise rts_exceptions.NotZombieObjectError(cmd_path)
# There is no point in doing path checks for the path, as the path we are
# deleting may not be in the tree if it's a zombie. Instead, we need to
# find its parent, and use that to remove the name.
parent = tree.get_node(path[:-1])
if parent.is_manager:
raise rts_exceptions.ParentNotADirectoryError(cmd_path)
if not parent.is_directory:
raise rts_exceptions.ParentNotADirectoryError(cmd_path)
parent.unbind(path[-1])
def delete_all_zombies(options, tree=None):
if not tree:
tree = rtctree.tree.RTCTree()
if not tree:
return 1
def del_zombie(node, args):
try:
node.parent.unbind(node.name)
except Exception as e:
if options.verbose:
traceback.print_exc()
print('{0}: {1}'.format(sys.argv[0], e), file=sys.stderr)
tree.iterate(del_zombie, filter=['is_zombie'])
def main(argv=None, tree=None):
usage = '''Usage: %prog [options] <path>
Delete an object from a name server.'''
version = rtshell.RTSH_VERSION
parser = optparse.OptionParser(usage=usage, version=version)
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='Output verbose information. [Default: %default]')
parser.add_option('-z', '--zombies', dest='zombies', action='store_true',
default=False, help='Delete only zombies. [Default: %default]')
if argv:
sys.argv = [sys.argv[0]] + argv
try:
options, args = parser.parse_args()
except optparse.OptionError as e:
print('OptionError:', e, file=sys.stderr)
return 1
try:
if not args:
if not options.zombies:
print('{0}: No path given.'.format(sys.argv[0]),
file=sys.stderr)
return 1
else:
# If no path given, delete all zombies found
delete_all_zombies(options, tree)
elif len(args) == 1:
full_path = path.cmd_path_to_full_path(args[0])
# Some sanity checks
if full_path == '/':
print('{0}: Cannot delete the root directory.'.format(sys.argv[0]),
file=sys.stderr)
return 1
delete_object_reference(args[0], full_path, options, tree)
else:
print(usage, file=sys.stderr)
return 1
except Exception as e:
if options.verbose:
traceback.print_exc()
print('{0}: {1}'.format(os.path.basename(sys.argv[0]), e),
file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
import sys
sys.exit(main())
# vim: tw=79
| lgpl-3.0 |
florian-f/sklearn | sklearn/manifold/locally_linear.py | 3 | 24871 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD, (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, check_arrays
from ..utils.arpack import eigsh
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) / 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
`embedding_vectors_` : array-like, shape [n_components, n_samples]
Stores the embedding vectors
`reconstruction_error_` : float
Reconstruction error associated with `embedding_vectors_`
`nbrs_` : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X, = check_arrays(X, sparse_format='dense')
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
X = array2d(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
haveal/googleads-dfa-reporting-samples | python/v2.1/get_reports.py | 3 | 2011 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to get a list of all reports."""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to list reports for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.reports().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for report in response['items']:
print ('Found %s report with ID %s and name "%s".'
% (report['type'], report['id'], report['name']))
if response['items'] and response['nextPageToken']:
request = service.reports().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
dbrgn/pygments-mirror | pygments/filter.py | 365 | 2071 | # -*- coding: utf-8 -*-
"""
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'function': f,
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__
})
class Filter(object):
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable-msg=E1102
for ttype, value in self.function(lexer, stream, self.options):
yield ttype, value
| bsd-2-clause |
hermaa02/cs330-final | db.py | 1 | 5927 | from flask.ext.sqlalchemy import SQLAlchemy
from flask import Flask
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://jkrbnkekcosnxb:jOrPwurv94mnMu7acVpIX0W2sA@ec2-54-197-230-161.compute-1.amazonaws.com:5432/da79ahfav90021'
db = SQLAlchemy(app)
class Item(db.Model):
__tablename__ = "Item"
item_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
price = db.Column(db.String(6))
size_w = db.Column(db.Integer)
size_h = db.Column(db.Integer)
def __init__(self,name,price,size_w,size_h):
self.name = name
self.price = price
self.size_w = size_w
self.size_h = size_h
#Item(item_id=,name=,price=,size_w=,size_h)
class Description(db.Model):
__tablename__ = "Description"
item_id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(250))
img = db.Column(db.String(250))
blue = db.Column(db.Boolean,default=False,nullable=False)
red = db.Column(db.Boolean,default=False,nullable=False)
orange = db.Column(db.Boolean,default=False,nullable=False)
yellow = db.Column(db.Boolean,default=False,nullable=False)
purple = db.Column(db.Boolean,default=False,nullable=False)
green = db.Column(db.Boolean,default=False,nullable=False)
def __init__(self,description,img,blue,red,orange,yellow,purple,green):
self.description = description
self.img = img
self.blue = blue
self.red = red
self.orange = orange
self.yellow = yellow
self.purple = purple
self.green = green
#Description(item_id= , description = , img = , blue = , red = , orange = , yellow =, purple = , green =)
class History(db.Model):
__tablename__ = "History"
item_id = db.Column(db.Integer,primary_key=True)
sold = db.Column(db.Integer)
available = db.Column(db.Integer)
def __init__(self,sold,available):
self.sold = sold
self.available = available
#History(item_id=,sold=,available=)
class Contact(db.Model):
__tablename__ = "Contact"
item_id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(100))
email = db.Column(db.String(100))
message = db.Column(db.String(1000))
phone = db.Column(db.String(10))
def __init__(self,name,email,message,phone):
self.name = name
self.email = email
self.message = message
self.phone = phone
#db.create_all()
'''
item1 = Item(name = 'circle canvas',price = 175,size_w = 24,size_h=24)
descrip1 = Description(description = 'circular canvas, with deer and red.', img = 'ht_1.jpg', blue = False, red = True, orange = True, yellow = True, purple = False, green = False)
history1 = History(sold=0,available=1)
item2 = Item(name = 'square canvas',price = 150,size_w = 24,size_h=24)
descrip2 = Description(description = 'square canvas with human and abstract art', img = 'ht_2.jpg', blue = True, red = True, orange = True, yellow =False, purple = False, green =True)
history2 = History(sold=0,available=1)
item3 = Item(name = 'square canvas',price = 150,size_w = 24,size_h=24)
descrip3 = Description( description = 'square canvas with abstract painting', img = 'ht_3.jpg', blue = False, red = True, orange = True, yellow =True, purple = False, green =True)
history3 = History(sold=0,available=1)
item4 = Item(name = 'circle canvas',price = 175,size_w = 24,size_h=24)
descrip4 = Description(description = 'circular canvas with ', img = 'ht_4.jpg', blue = True, red = True, orange =True, yellow =True, purple = False, green =False)
history4 = History(sold=0,available=1)
item5 = Item(name = 'square canvas',price = 180,size_w = 24,size_h=24)
descrip5 = Description(description = 'abstract square canvas with blue trees, lines', img = 'ht_5.jpg', blue =True, red =True, orange =True, yellow =True, purple =False, green =True)
history5 = History(sold=0,available=1)
item6 = Item(name = 'square canvas',price = 150,size_w = 24,size_h=24)
descrip6 = Description(description ='square canvas with valley and pine trees', img = 'ht_6.jpg', blue = True, red =False, orange =True, yellow =True, purple =False, green =False)
history6 = History(sold=0,available=1)
item7 = Item(name = 'rectangle drawing',price = 10,size_w = 4,size_h=6)
descrip7 = Description(description = 'tree drawing', img = 'ht_7.jpg', blue = False, red = False, orange = False, yellow =False, purple = False, green =True)
history7 = History(sold=0,available=1)
item8 = Item(name = 'square drawing',price = 10,size_w = 5,size_h=5)
descrip8 = Description( description = 'tree drawing', img = 'ht_8.jpg', blue = False, red = False, orange = False, yellow =False, purple = False, green =True)
history8 = History(sold=0,available=1)
item9 = Item(name = 'square drawing',price = 10,size_w = 5,size_h=5)
descrip9 = Description(description = 'tree drawing', img = 'ht_9.jpg', blue = False, red = False, orange = False, yellow =False, purple = False, green =True)
history9 = History(sold=0,available=1)
db.session.add(item1)
db.session.add(descrip1)
db.session.add(history1)
db.session.add(item2)
db.session.add(descrip2)
db.session.add(history2)
db.session.add(item3)
db.session.add(descrip3)
db.session.add(history3)
db.session.add(item4)
db.session.add(descrip4)
db.session.add(history4)
db.session.add(item5)
db.session.add(descrip5)
db.session.add(history5)
db.session.add(item6)
db.session.add(descrip6)
db.session.add(history6)
db.session.add(item7)
db.session.add(descrip7)
db.session.add(history7)
db.session.add(item8)
db.session.add(descrip8)
db.session.add(history8)
db.session.add(item9)
db.session.add(descrip9)
db.session.add(history9)
db.session.commit()
'''
qu = Item.query.all()
for item in qu:
print(item.price)
'''
num_deleted = db.session.query(Item).delete()
db.session.commit()
print(num_deleted)
qu = Item.query.all()
for item in qu:
print(item.price)
''' | apache-2.0 |
gfcapalbo/website | website_hr_contact/__openerp__.py | 3 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of website_hr_contact,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# website_hr_contact is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# website_hr_contact is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with website_hr_contact.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "hR Address Book",
'summary': """
Display your hr address book in your website""",
'author': 'ACSONE SA/NV,'
'Odoo Community Association (OCA)',
'website': "http://acsone.eu",
'category': 'Website',
'version': '8.0.1.0.0',
'license': 'AGPL-3',
'depends': [
'website_hr',
],
'data': [
'views/website_hr_contact_templates.xml',
'data/website_hr_contact_data.xml',
],
}
| agpl-3.0 |
kamarush/android_kernel_sony_yuga_lp | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
imuse2012/grit-i18n | grit/gather/tr_html.py | 61 | 27320 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A gatherer for the TotalRecall brand of HTML templates with replaceable
portions. We wanted to reuse extern.tclib.api.handlers.html.TCHTMLParser
but this proved impossible due to the fact that the TotalRecall HTML templates
are in general quite far from parseable HTML and the TCHTMLParser derives
from HTMLParser.HTMLParser which requires relatively well-formed HTML. Some
examples of "HTML" from the TotalRecall HTML templates that wouldn't be
parseable include things like:
<a [PARAMS]>blabla</a> (not parseable because attributes are invalid)
<table><tr><td>[LOTSOFSTUFF]</tr></table> (not parseable because closing
</td> is in the HTML [LOTSOFSTUFF]
is replaced by)
The other problem with using general parsers (such as TCHTMLParser) is that
we want to make sure we output the TotalRecall template with as little changes
as possible in terms of whitespace characters, layout etc. With any parser
that generates a parse tree, and generates output by dumping the parse tree,
we would always have little inconsistencies which could cause bugs (the
TotalRecall template stuff is quite brittle and can break if e.g. a tab
character is replaced with spaces).
The solution, which may be applicable to some other HTML-like template
languages floating around Google, is to create a parser with a simple state
machine that keeps track of what kind of tag it's inside, and whether it's in
a translateable section or not. Translateable sections are:
a) text (including [BINGO] replaceables) inside of tags that
can contain translateable text (which is all tags except
for a few)
b) text inside of an 'alt' attribute in an <image> element, or
the 'value' attribute of a <submit>, <button> or <text>
element.
The parser does not build up a parse tree but rather a "skeleton" which
is a list of nontranslateable strings intermingled with grit.clique.MessageClique
objects. This simplifies the parser considerably compared to a regular HTML
parser. To output a translated document, each item in the skeleton is
printed out, with the relevant Translation from each MessageCliques being used
for the requested language.
This implementation borrows some code, constants and ideas from
extern.tclib.api.handlers.html.TCHTMLParser.
'''
import re
import types
from grit import clique
from grit import exception
from grit import lazy_re
from grit import util
from grit import tclib
from grit.gather import interface
# HTML tags which break (separate) chunks.
_BLOCK_TAGS = ['script', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'br',
'body', 'style', 'head', 'title', 'table', 'tr', 'td', 'th',
'ul', 'ol', 'dl', 'nl', 'li', 'div', 'object', 'center',
'html', 'link', 'form', 'select', 'textarea',
'button', 'option', 'map', 'area', 'blockquote', 'pre',
'meta', 'xmp', 'noscript', 'label', 'tbody', 'thead',
'script', 'style', 'pre', 'iframe', 'img', 'input', 'nowrap',
'fieldset', 'legend']
# HTML tags which may appear within a chunk.
_INLINE_TAGS = ['b', 'i', 'u', 'tt', 'code', 'font', 'a', 'span', 'small',
'key', 'nobr', 'url', 'em', 's', 'sup', 'strike',
'strong']
# HTML tags within which linebreaks are significant.
_PREFORMATTED_TAGS = ['textarea', 'xmp', 'pre']
# An array mapping some of the inline HTML tags to more meaningful
# names for those tags. This will be used when generating placeholders
# representing these tags.
_HTML_PLACEHOLDER_NAMES = { 'a' : 'link', 'br' : 'break', 'b' : 'bold',
'i' : 'italic', 'li' : 'item', 'ol' : 'ordered_list', 'p' : 'paragraph',
'ul' : 'unordered_list', 'img' : 'image', 'em' : 'emphasis' }
# We append each of these characters in sequence to distinguish between
# different placeholders with basically the same name (e.g. BOLD1, BOLD2).
# Keep in mind that a placeholder name must not be a substring of any other
# placeholder name in the same message, so we can't simply count (BOLD_1
# would be a substring of BOLD_10).
_SUFFIXES = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Matches whitespace in an HTML document. Also matches HTML comments, which are
# treated as whitespace.
_WHITESPACE = lazy_re.compile(r'(\s| |\\n|\\r|<!--\s*desc\s*=.*?-->)+',
re.DOTALL)
# Matches whitespace sequences which can be folded into a single whitespace
# character. This matches single characters so that non-spaces are replaced
# with spaces.
_FOLD_WHITESPACE = lazy_re.compile(r'\s+')
# Finds a non-whitespace character
_NON_WHITESPACE = lazy_re.compile(r'\S')
# Matches two or more in a row (a single   is not changed into
# placeholders because different languages require different numbers of spaces
# and placeholders must match exactly; more than one is probably a "special"
# whitespace sequence and should be turned into a placeholder).
_NBSP = lazy_re.compile(r' ( )+')
# Matches nontranslateable chunks of the document
_NONTRANSLATEABLES = lazy_re.compile(r'''
<\s*script.+?<\s*/\s*script\s*>
|
<\s*style.+?<\s*/\s*style\s*>
|
<!--.+?-->
|
<\?IMPORT\s.+?> # import tag
|
<\s*[a-zA-Z_]+:.+?> # custom tag (open)
|
<\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
|
<!\s*[A-Z]+\s*([^>]+|"[^"]+"|'[^']+')*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches a tag and its attributes
_ELEMENT = lazy_re.compile(r'''
# Optional closing /, element name
<\s*(?P<closing>/)?\s*(?P<element>[a-zA-Z0-9]+)\s*
# Attributes and/or replaceables inside the tag, if any
(?P<atts>(
\s*([a-zA-Z_][-:.a-zA-Z_0-9]*) # Attribute name
(\s*=\s*(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?
|
\s*\[(\$?\~)?([A-Z0-9-_]+?)(\~\$?)?\]
)*)
\s*(?P<empty>/)?\s*> # Optional empty-tag closing /, and tag close
''',
re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches elements that may have translateable attributes. The value of these
# special attributes is given by group 'value1' or 'value2'. Note that this
# regexp demands that the attribute value be quoted; this is necessary because
# the non-tree-building nature of the parser means we don't know when we're
# writing out attributes, so we wouldn't know to escape spaces.
_SPECIAL_ELEMENT = lazy_re.compile(r'''
<\s*(
input[^>]+?value\s*=\s*(\'(?P<value3>[^\']*)\'|"(?P<value4>[^"]*)")
[^>]+type\s*=\s*"?'?(button|reset|text|submit)'?"?
|
(
table[^>]+?title\s*=
|
img[^>]+?alt\s*=
|
input[^>]+?type\s*=\s*"?'?(button|reset|text|submit)'?"?[^>]+?value\s*=
)
\s*(\'(?P<value1>[^\']*)\'|"(?P<value2>[^"]*)")
)[^>]*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches stuff that is translateable if it occurs in the right context
# (between tags). This includes all characters and character entities.
# Note that this also matches which needs to be handled as whitespace
# before this regexp is applied.
_CHARACTERS = lazy_re.compile(r'''
(
\w
|
[\!\@\#\$\%\^\*\(\)\-\=\_\+\[\]\{\}\\\|\;\:\'\"\,\.\/\?\`\~]
|
&(\#[0-9]+|\#x[0-9a-fA-F]+|[A-Za-z0-9]+);
)+
''', re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches Total Recall's "replaceable" tags, which are just any text
# in capitals enclosed by delimiters like [] or [~~] or [$~~$] (e.g. [HELLO],
# [~HELLO~] and [$~HELLO~$]).
_REPLACEABLE = lazy_re.compile(r'\[(\$?\~)?(?P<name>[A-Z0-9-_]+?)(\~\$?)?\]',
re.MULTILINE)
# Matches the silly [!]-prefixed "header" that is used in some TotalRecall
# templates.
_SILLY_HEADER = lazy_re.compile(r'\[!\]\ntitle\t(?P<title>[^\n]+?)\n.+?\n\n',
re.MULTILINE | re.DOTALL)
# Matches a comment that provides a description for the message it occurs in.
_DESCRIPTION_COMMENT = lazy_re.compile(
r'<!--\s*desc\s*=\s*(?P<description>.+?)\s*-->', re.DOTALL)
# Matches a comment which is used to break apart multiple messages.
_MESSAGE_BREAK_COMMENT = lazy_re.compile(r'<!--\s*message-break\s*-->',
re.DOTALL)
# Matches a comment which is used to prevent block tags from splitting a message
_MESSAGE_NO_BREAK_COMMENT = re.compile(r'<!--\s*message-no-break\s*-->',
re.DOTALL)
_DEBUG = 0
def _DebugPrint(text):
if _DEBUG:
print text.encode('utf-8')
class HtmlChunks(object):
'''A parser that knows how to break an HTML-like document into a list of
chunks, where each chunk is either translateable or non-translateable.
The chunks are unmodified sections of the original document, so concatenating
the text of all chunks would result in the original document.'''
def InTranslateable(self):
return self.last_translateable != -1
def Rest(self):
return self.text_[self.current:]
def StartTranslateable(self):
assert not self.InTranslateable()
if self.current != 0:
# Append a nontranslateable chunk
chunk_text = self.text_[self.chunk_start : self.last_nontranslateable + 1]
# Needed in the case where document starts with a translateable.
if len(chunk_text) > 0:
self.AddChunk(False, chunk_text)
self.chunk_start = self.last_nontranslateable + 1
self.last_translateable = self.current
self.last_nontranslateable = -1
def EndTranslateable(self):
assert self.InTranslateable()
# Append a translateable chunk
self.AddChunk(True,
self.text_[self.chunk_start : self.last_translateable + 1])
self.chunk_start = self.last_translateable + 1
self.last_translateable = -1
self.last_nontranslateable = self.current
def AdvancePast(self, match):
self.current += match.end()
def AddChunk(self, translateable, text):
'''Adds a chunk to self, removing linebreaks and duplicate whitespace
if appropriate.
'''
m = _DESCRIPTION_COMMENT.search(text)
if m:
self.last_description = m.group('description')
# Remove the description from the output text
text = _DESCRIPTION_COMMENT.sub('', text)
m = _MESSAGE_BREAK_COMMENT.search(text)
if m:
# Remove the coment from the output text. It should already effectively
# break apart messages.
text = _MESSAGE_BREAK_COMMENT.sub('', text)
if translateable and not self.last_element_ in _PREFORMATTED_TAGS:
if self.fold_whitespace_:
# Fold whitespace sequences if appropriate. This is optional because it
# alters the output strings.
text = _FOLD_WHITESPACE.sub(' ', text)
else:
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
# This whitespace folding doesn't work in all cases, thus the
# fold_whitespace flag to support backwards compatibility.
text = text.replace(' ', ' ')
text = text.replace(' ', ' ')
if translateable:
description = self.last_description
self.last_description = ''
else:
description = ''
if text != '':
self.chunks_.append((translateable, text, description))
def Parse(self, text, fold_whitespace):
'''Parses self.text_ into an intermediate format stored in self.chunks_
which is translateable and nontranslateable chunks. Also returns
self.chunks_
Args:
text: The HTML for parsing.
fold_whitespace: Whether whitespace sequences should be folded into a
single space.
Return:
[chunk1, chunk2, chunk3, ...] (instances of class Chunk)
'''
#
# Chunker state
#
self.text_ = text
self.fold_whitespace_ = fold_whitespace
# A list of tuples (is_translateable, text) which represents the document
# after chunking.
self.chunks_ = []
# Start index of the last chunk, whether translateable or not
self.chunk_start = 0
# Index of the last for-sure translateable character if we are parsing
# a translateable chunk, -1 to indicate we are not in a translateable chunk.
# This is needed so that we don't include trailing whitespace in the
# translateable chunk (whitespace is neutral).
self.last_translateable = -1
# Index of the last for-sure nontranslateable character if we are parsing
# a nontranslateable chunk, -1 if we are not in a nontranslateable chunk.
# This is needed to make sure we can group e.g. "<b>Hello</b> there"
# together instead of just "Hello</b> there" which would be much worse
# for translation.
self.last_nontranslateable = -1
# Index of the character we're currently looking at.
self.current = 0
# The name of the last block element parsed.
self.last_element_ = ''
# The last explicit description we found.
self.last_description = ''
# Whether no-break was the last chunk seen
self.last_nobreak = False
while self.current < len(self.text_):
_DebugPrint('REST: %s' % self.text_[self.current:self.current+60])
m = _MESSAGE_NO_BREAK_COMMENT.match(self.Rest())
if m:
self.AdvancePast(m)
self.last_nobreak = True
continue
# Try to match whitespace
m = _WHITESPACE.match(self.Rest())
if m:
# Whitespace is neutral, it just advances 'current' and does not switch
# between translateable/nontranslateable. If we are in a
# nontranslateable section that extends to the current point, we extend
# it to include the whitespace. If we are in a translateable section,
# we do not extend it until we find
# more translateable parts, because we never want a translateable chunk
# to end with whitespace.
if (not self.InTranslateable() and
self.last_nontranslateable == self.current - 1):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Then we try to match nontranslateables
m = _NONTRANSLATEABLES.match(self.Rest())
if m:
if self.InTranslateable():
self.EndTranslateable()
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Now match all other HTML element tags (opening, closing, or empty, we
# don't care).
m = _ELEMENT.match(self.Rest())
if m:
element_name = m.group('element').lower()
if element_name in _BLOCK_TAGS:
self.last_element_ = element_name
if self.InTranslateable():
if self.last_nobreak:
self.last_nobreak = False
else:
self.EndTranslateable()
# Check for "special" elements, i.e. ones that have a translateable
# attribute, and handle them correctly. Note that all of the
# "special" elements are block tags, so no need to check for this
# if the tag is not a block tag.
sm = _SPECIAL_ELEMENT.match(self.Rest())
if sm:
# Get the appropriate group name
for group in sm.groupdict().keys():
if sm.groupdict()[group]:
break
# First make a nontranslateable chunk up to and including the
# quote before the translateable attribute value
self.AddChunk(False, self.text_[
self.chunk_start : self.current + sm.start(group)])
# Then a translateable for the translateable bit
self.AddChunk(True, self.Rest()[sm.start(group) : sm.end(group)])
# Finally correct the data invariant for the parser
self.chunk_start = self.current + sm.end(group)
self.last_nontranslateable = self.current + m.end() - 1
elif self.InTranslateable():
# We're in a translateable and the tag is an inline tag, so we
# need to include it in the translateable.
self.last_translateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Anything else we find must be translateable, so we advance one character
# at a time until one of the above matches.
if not self.InTranslateable():
self.StartTranslateable()
else:
self.last_translateable = self.current
self.current += 1
# Close the final chunk
if self.InTranslateable():
self.AddChunk(True, self.text_[self.chunk_start : ])
else:
self.AddChunk(False, self.text_[self.chunk_start : ])
return self.chunks_
def HtmlToMessage(html, include_block_tags=False, description=''):
'''Takes a bit of HTML, which must contain only "inline" HTML elements,
and changes it into a tclib.Message. This involves escaping any entities and
replacing any HTML code with placeholders.
If include_block_tags is true, no error will be given if block tags (e.g.
<p> or <br>) are included in the HTML.
Args:
html: 'Hello <b>[USERNAME]</b>, how <i>are</i> you?'
include_block_tags: False
Return:
tclib.Message('Hello START_BOLD1USERNAMEEND_BOLD, '
'howNBSPSTART_ITALICareEND_ITALIC you?',
[ Placeholder('START_BOLD', '<b>', ''),
Placeholder('USERNAME', '[USERNAME]', ''),
Placeholder('END_BOLD', '</b>', ''),
Placeholder('START_ITALIC', '<i>', ''),
Placeholder('END_ITALIC', '</i>', ''), ])
'''
# Approach is:
# - first placeholderize, finding <elements>, [REPLACEABLES] and
# - then escape all character entities in text in-between placeholders
parts = [] # List of strings (for text chunks) and tuples (ID, original)
# for placeholders
count_names = {} # Map of base names to number of times used
end_names = {} # Map of base names to stack of end tags (for correct nesting)
def MakeNameClosure(base, type = ''):
'''Returns a closure that can be called once all names have been allocated
to return the final name of the placeholder. This allows us to minimally
number placeholders for non-overlap.
Also ensures that END_XXX_Y placeholders have the same Y as the
corresponding BEGIN_XXX_Y placeholder when we have nested tags of the same
type.
Args:
base: 'phname'
type: '' | 'begin' | 'end'
Return:
Closure()
'''
name = base.upper()
if type != '':
name = ('%s_%s' % (type, base)).upper()
if name in count_names.keys():
count_names[name] += 1
else:
count_names[name] = 1
def MakeFinalName(name_ = name, index = count_names[name] - 1):
if (type.lower() == 'end' and
base in end_names.keys() and len(end_names[base])):
return end_names[base].pop(-1) # For correct nesting
if count_names[name_] != 1:
name_ = '%s_%s' % (name_, _SUFFIXES[index])
# We need to use a stack to ensure that the end-tag suffixes match
# the begin-tag suffixes. Only needed when more than one tag of the
# same type.
if type == 'begin':
end_name = ('END_%s_%s' % (base, _SUFFIXES[index])).upper()
if base in end_names.keys():
end_names[base].append(end_name)
else:
end_names[base] = [end_name]
return name_
return MakeFinalName
current = 0
last_nobreak = False
while current < len(html):
m = _MESSAGE_NO_BREAK_COMMENT.match(html[current:])
if m:
last_nobreak = True
current += m.end()
continue
m = _NBSP.match(html[current:])
if m:
parts.append((MakeNameClosure('SPACE'), m.group()))
current += m.end()
continue
m = _REPLACEABLE.match(html[current:])
if m:
# Replaceables allow - but placeholders don't, so replace - with _
ph_name = MakeNameClosure('X_%s_X' % m.group('name').replace('-', '_'))
parts.append((ph_name, m.group()))
current += m.end()
continue
m = _SPECIAL_ELEMENT.match(html[current:])
if m:
if not include_block_tags:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html)
element_name = 'block' # for simplification
# Get the appropriate group name
for group in m.groupdict().keys():
if m.groupdict()[group]:
break
parts.append((MakeNameClosure(element_name, 'begin'),
html[current : current + m.start(group)]))
parts.append(m.group(group))
parts.append((MakeNameClosure(element_name, 'end'),
html[current + m.end(group) : current + m.end()]))
current += m.end()
continue
m = _ELEMENT.match(html[current:])
if m:
element_name = m.group('element').lower()
if not include_block_tags and not element_name in _INLINE_TAGS:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html[current:])
if element_name in _HTML_PLACEHOLDER_NAMES: # use meaningful names
element_name = _HTML_PLACEHOLDER_NAMES[element_name]
# Make a name for the placeholder
type = ''
if not m.group('empty'):
if m.group('closing'):
type = 'end'
else:
type = 'begin'
parts.append((MakeNameClosure(element_name, type), m.group()))
current += m.end()
continue
if len(parts) and isinstance(parts[-1], types.StringTypes):
parts[-1] += html[current]
else:
parts.append(html[current])
current += 1
msg_text = ''
placeholders = []
for part in parts:
if isinstance(part, types.TupleType):
final_name = part[0]()
original = part[1]
msg_text += final_name
placeholders.append(tclib.Placeholder(final_name, original, '(HTML code)'))
else:
msg_text += part
msg = tclib.Message(text=msg_text, placeholders=placeholders,
description=description)
content = msg.GetContent()
for ix in range(len(content)):
if isinstance(content[ix], types.StringTypes):
content[ix] = util.UnescapeHtml(content[ix], replace_nbsp=False)
return msg
class TrHtml(interface.GathererBase):
'''Represents a document or message in the template format used by
Total Recall for HTML documents.'''
def __init__(self, *args, **kwargs):
super(TrHtml, self).__init__(*args, **kwargs)
self.have_parsed_ = False
self.skeleton_ = [] # list of strings and MessageClique objects
self.fold_whitespace_ = False
def SetAttributes(self, attrs):
'''Sets node attributes used by the gatherer.
This checks the fold_whitespace attribute.
Args:
attrs: The mapping of node attributes.
'''
self.fold_whitespace_ = ('fold_whitespace' in attrs and
attrs['fold_whitespace'] == 'true')
def GetText(self):
'''Returns the original text of the HTML document'''
return self.text_
def GetTextualIds(self):
return [self.extkey]
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
document.'''
return [x for x in self.skeleton_ if isinstance(x, clique.MessageClique)]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns this document with translateable messages filled with
the translation for language 'lang'.
Args:
lang: 'en'
pseudo_if_not_available: True
Return:
'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' is false
and there is no translation for the requested language.
'''
if len(self.skeleton_) == 0:
raise exception.NotReady()
# TODO(joi) Implement support for skeleton gatherers here.
out = []
for item in self.skeleton_:
if isinstance(item, types.StringTypes):
out.append(item)
else:
msg = item.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
for content in msg.GetContent():
if isinstance(content, tclib.Placeholder):
out.append(content.GetOriginal())
else:
# We escape " characters to increase the chance that attributes
# will be properly escaped.
out.append(util.EscapeHtml(content, True))
return ''.join(out)
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
text = self._LoadInputFile()
# Ignore the BOM character if the document starts with one.
if text.startswith(u'\ufeff'):
text = text[1:]
self.text_ = text
# Parsing is done in two phases: First, we break the document into
# translateable and nontranslateable chunks. Second, we run through each
# translateable chunk and insert placeholders for any HTML elements,
# unescape escaped characters, etc.
# First handle the silly little [!]-prefixed header because it's not
# handled by our HTML parsers.
m = _SILLY_HEADER.match(text)
if m:
self.skeleton_.append(text[:m.start('title')])
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=text[m.start('title'):m.end('title')])))
self.skeleton_.append(text[m.end('title') : m.end()])
text = text[m.end():]
chunks = HtmlChunks().Parse(text, self.fold_whitespace_)
for chunk in chunks:
if chunk[0]: # Chunk is translateable
self.skeleton_.append(self.uberclique.MakeClique(
HtmlToMessage(chunk[1], description=chunk[2])))
else:
self.skeleton_.append(chunk[1])
# Go through the skeleton and change any messages that consist solely of
# placeholders and whitespace into nontranslateable strings.
for ix in range(len(self.skeleton_)):
got_text = False
if isinstance(self.skeleton_[ix], clique.MessageClique):
msg = self.skeleton_[ix].GetMessage()
for item in msg.GetContent():
if (isinstance(item, types.StringTypes) and _NON_WHITESPACE.search(item)
and item != ' '):
got_text = True
break
if not got_text:
self.skeleton_[ix] = msg.GetRealContent()
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Goes through the skeleton and finds all MessageCliques.
Args:
substituter: a grit.util.Substituter object.
'''
new_skel = []
for chunk in self.skeleton_:
if isinstance(chunk, clique.MessageClique):
old_message = chunk.GetMessage()
new_message = substituter.SubstituteMessage(old_message)
if new_message is not old_message:
new_skel.append(self.uberclique.MakeClique(new_message))
continue
new_skel.append(chunk)
self.skeleton_ = new_skel
| bsd-2-clause |
HtmlUnit/selenium | py/test/selenium/webdriver/common/interactions_tests.py | 63 | 10376 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for advanced user interactions."""
import unittest
import pytest
import sys
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
class AdvancedUserInteractionTest(unittest.TestCase):
def _before(self):
if self.driver.capabilities['browserName'] == 'firefox' and sys.platform == 'darwin':
pytest.skip("native events not supported on Mac for Firefox")
def performDragAndDropWithMouse(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
#self._before()
self._loadPage("draggableLists")
dragReporter = self.driver.find_element_by_id("dragging_reports")
toDrag = self.driver.find_element_by_id("rightitem-3")
dragInto = self.driver.find_element_by_id("sortable1")
holdItem = ActionChains(self.driver).click_and_hold(toDrag)
moveToSpecificItem = ActionChains(self.driver) \
.move_to_element(self.driver.find_element_by_id("leftitem-4"))
moveToOtherList = ActionChains(self.driver).move_to_element(dragInto)
drop = ActionChains(self.driver).release(dragInto)
self.assertEqual("Nothing happened.", dragReporter.text)
holdItem.perform()
moveToSpecificItem.perform()
moveToOtherList.perform()
self.assertEqual("Nothing happened. DragOut", dragReporter.text)
drop.perform()
def testDraggingElementWithMouseMovesItToAnotherList(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self.performDragAndDropWithMouse()
dragInto = self.driver.find_element_by_id("sortable1")
self.assertEqual(6, len(dragInto.find_elements_by_tag_name("li")))
def _testDraggingElementWithMouseFiresEvents(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface.
Disabled since this test doesn't work with HTMLUNIT.
"""
self.performDragAndDropWithMouse()
dragReporter = self.driver.find_element_by_id("dragging_reports")
self.assertEqual("Nothing happened. DragOut DropIn RightItem 3", dragReporter.text)
def _isElementAvailable(self, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
self.driver.find_element_by_id(id)
return True
except:
return False
def testDragAndDrop(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
element_available_timeout = 15
wait = WebDriverWait(self, element_available_timeout)
self._loadPage("droppableItems")
wait.until(lambda dr: dr._isElementAvailable("draggable"))
if not self._isElementAvailable("draggable"):
raise "Could not find draggable element after 15 seconds."
toDrag = self.driver.find_element_by_id("draggable")
dropInto = self.driver.find_element_by_id("droppable")
holdDrag = ActionChains(self.driver) \
.click_and_hold(toDrag)
move = ActionChains(self.driver) \
.move_to_element(dropInto)
drop = ActionChains(self.driver).release(dropInto)
holdDrag.perform()
move.perform()
drop.perform()
dropInto = self.driver.find_element_by_id("droppable")
text = dropInto.find_element_by_tag_name("p").text
self.assertEqual("Dropped!", text)
def testDoubleClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
#pytest.skip("doubleClick is failing server-side")
self._loadPage("javascriptPage")
toDoubleClick = self.driver.find_element_by_id("doubleClickField")
dblClick = ActionChains(self.driver) \
.double_click(toDoubleClick)
dblClick.perform()
self.assertEqual("DoubleClicked", toDoubleClick.get_attribute('value'))
def testContextClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver has an issue here")
toContextClick = self.driver.find_element_by_id("doubleClickField")
contextClick = ActionChains(self.driver) \
.context_click(toContextClick)
contextClick.perform()
self.assertEqual("ContextClicked",
toContextClick.get_attribute('value'))
def testMoveAndClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toClick = self.driver.find_element_by_id("clickField")
click = ActionChains(self.driver) \
.move_to_element(toClick) \
.click()
click.perform()
self.assertEqual("Clicked", toClick.get_attribute('value'))
@pytest.mark.ignore_chrome
def testCannotMoveToANullLocator(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
try:
move = ActionChains(self.driver) \
.move_to_element(None)
move.perform()
self.fail("Shouldn't be allowed to click on null element.")
except AttributeError:
pass # Expected.
def _testClickingOnFormElements(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest.
Disabled since this test doesn't work with HTMLUNIT.
"""
self._loadPage("formSelectionPage")
options = self.driver.find_elements_by_tag_name("option")
selectThreeOptions = ActionChains(self.driver) \
.click(options[1]) \
.key_down(Keys.SHIFT) \
.click(options[2]) \
.click(options[3]) \
.key_up(Keys.SHIFT)
selectThreeOptions.perform()
showButton = self.driver.find_element_by_name("showselected")
showButton.click()
resultElement = self.driver.find_element_by_id("result")
self.assertEqual("roquefort parmigiano cheddar", resultElement.text)
@pytest.mark.ignore_chrome
def testSelectingMultipleItems(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest."""
self._loadPage("selectableItems")
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not seem to select all the elements")
reportingElement = self.driver.find_element_by_id("infodiv")
self.assertEqual("no info", reportingElement.text)
listItems = self.driver.find_elements_by_tag_name("li")
selectThreeItems = ActionChains(self.driver) \
.key_down(Keys.CONTROL) \
.click(listItems[1]) \
.click(listItems[3]) \
.click(listItems[5]) \
.key_up(Keys.CONTROL)
selectThreeItems.perform()
self.assertEqual("#item2 #item4 #item6", reportingElement.text)
# Now click on another element, make sure that's the only one selected.
actionsBuilder = ActionChains(self.driver)
actionsBuilder.click(listItems[6]).perform()
self.assertEqual("#item7", reportingElement.text)
@pytest.mark.ignore_chrome
def testMovingMouseBackAndForthPastViewPort(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not seem to trigger the events")
self._before()
self._loadPage("veryLargeCanvas")
firstTarget = self.driver.find_element_by_id("r1")
ActionChains(self.driver) \
.move_to_element(firstTarget) \
.click() \
.perform()
resultArea = self.driver.find_element_by_id("result")
expectedEvents = "First"
wait = WebDriverWait(resultArea, 15)
expectedEventsFired = lambda e : e.text == expectedEvents;
wait.until(expectedEventsFired)
# Move to element with id 'r2', at (2500, 50) to (2580, 100).
ActionChains(self.driver) \
.move_by_offset(2540 - 150, 75 - 125) \
.click() \
.perform()
expectedEvents += " Second";
wait.until(expectedEventsFired)
# Move to element with id 'r3' at (60, 1500) to (140, 1550).
ActionChains(self.driver) \
.move_by_offset(100 - 2540, 1525 - 75) \
.click() \
.perform()
expectedEvents += " Third"
wait.until(expectedEventsFired)
# Move to element with id 'r4' at (220,180) to (320, 230).
ActionChains(self.driver) \
.move_by_offset(270 - 100, 205 - 1525) \
.click() \
.perform()
expectedEvents += " Fourth"
wait.until(expectedEventsFired)
def testSendingKeysToActiveElementWithModifier(self):
self._loadPage("formPage")
e = self.driver.find_element_by_id("working")
e.click()
ActionChains(self.driver) \
.key_down(Keys.SHIFT) \
.send_keys("abc")\
.key_up(Keys.SHIFT)\
.perform()
self.assertEqual("ABC", e.get_attribute('value'))
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
chamikaramj/incubator-beam | sdks/python/apache_beam/io/gcp/pubsub.py | 8 | 3406 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud PubSub sources and sinks.
Cloud Pub/Sub sources and sinks are currently supported only in streaming
pipelines, during remote execution.
"""
from __future__ import absolute_import
from apache_beam import coders
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms.display import DisplayDataItem
class PubSubSource(dataflow_io.NativeSource):
"""Source for reading from a given Cloud Pub/Sub topic.
Attributes:
topic: Cloud Pub/Sub topic in the form "/topics/<project>/<topic>".
subscription: Optional existing Cloud Pub/Sub subscription to use in the
form "projects/<project>/subscriptions/<subscription>".
id_label: The attribute on incoming Pub/Sub messages to use as a unique
record identifier. When specified, the value of this attribute (which can
be any string that uniquely identifies the record) will be used for
deduplication of messages. If not provided, Dataflow cannot guarantee
that no duplicate data will be delivered on the Pub/Sub stream. In this
case, deduplication of the stream will be strictly best effort.
coder: The Coder to use for decoding incoming Pub/Sub messages.
"""
def __init__(self, topic, subscription=None, id_label=None,
coder=coders.StrUtf8Coder()):
self.topic = topic
self.subscription = subscription
self.id_label = id_label
self.coder = coder
@property
def format(self):
"""Source format name required for remote execution."""
return 'pubsub'
def display_data(self):
return {'id_label':
DisplayDataItem(self.id_label,
label='ID Label Attribute').drop_if_none(),
'topic':
DisplayDataItem(self.topic,
label='Pubsub Topic'),
'subscription':
DisplayDataItem(self.subscription,
label='Pubsub Subscription').drop_if_none()}
def reader(self):
raise NotImplementedError(
'PubSubSource is not supported in local execution.')
class PubSubSink(dataflow_io.NativeSink):
"""Sink for writing to a given Cloud Pub/Sub topic."""
def __init__(self, topic, coder=coders.StrUtf8Coder()):
self.topic = topic
self.coder = coder
@property
def format(self):
"""Sink format name required for remote execution."""
return 'pubsub'
def display_data(self):
return {'topic': DisplayDataItem(self.topic, label='Pubsub Topic')}
def writer(self):
raise NotImplementedError(
'PubSubSink is not supported in local execution.')
| apache-2.0 |
CPFL/gxen | tools/xm-test/lib/XmTestReport/ResultReport.py | 42 | 3981 | #!/usr/bin/python
"""
ResultReport.py - Handles the gathering and xml-formatting of xm-test
results
Copyright (C) International Business Machines Corp., 2005
Author: Dan Smith <danms@us.ibm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import utils
import re
class Test:
def __init__(self, name, state, seq):
self.vars = {}
self.vars["name"] = name
self.vars["state"] = state
self.vars["log"] = "NO LOG SUPPLIED"
self.vars["seq"] = str(seq)
def setLog(self, log):
self.vars["log"] = log
def __str__(self):
string = "<test>\n"
for k in self.vars.keys():
string += " " + utils.tagify(k, self.vars[k]) + "\n"
string += "</test>\n"
return string
class TestGroup:
def __init__(self, name):
self.name = name
self.tests = []
def addTest(self, test):
self.tests.append(test)
def __str__(self):
string = "<group>\n"
string += " <name>%s</name>\n" % self.name
for t in self.tests:
string += str(t)
string += "</group>\n"
return string
class ResultSet:
def __init__(self):
self.groups = []
def addGroup(self, group):
self.groups.append(group)
def __str__(self):
string = "<results>\n"
for g in self.groups:
string += str(g)
string += "</results>\n"
return string
class ResultParser:
def __init__(self):
self.groups = {}
self.resultSet = None
def __isImportant(self, line):
if re.search("^[Mm]ak(e|ing)", line):
return False
if re.search("^===", line):
return False
if re.search("^All [0-9]+ tests", line):
return False
if re.search("^[0-9]+ of [0-9]+ tests", line):
return False
if re.search("^cp [0-9]+_", line):
return False
if re.search("^chmod \+x [0-9]+_", line):
return False
return True
def parse(self, fileName):
output = file(fileName);
contents = output.read()
lines = contents.split("\n")
sequence = 0
currentLog = ""
for l in lines:
match = re.match("^(PASS|FAIL|XPASS|XFAIL|SKIP): ([0-9]+)_([^_]+)_([^\.]+)\.test$", l)
if match:
# End of current test; build object
testStatus = match.group(1)
testNumber = match.group(2)
testGroup = match.group(3)
testName = match.group(4)
if not testGroup in self.groups.keys():
self.groups[testGroup] = TestGroup(testGroup)
test = Test("%s_%s" % (testNumber, testName), testStatus,
sequence)
sequence += 1
test.setLog(currentLog)
self.groups[testGroup].addTest(test)
currentLog = ""
else:
if self.__isImportant(l):
currentLog += l + "\n"
self.resultSet = ResultSet()
for g in self.groups:
self.resultSet.addGroup(self.groups[g])
return self.resultSet
if __name__ == "__main__":
import sys
r = ResultParser()
print str(r.parse(sys.argv[1]))
| gpl-2.0 |
bright-sparks/chromium-spacewalk | tools/perf/measurements/media.py | 6 | 2658 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import cpu
from metrics import media
from metrics import system_memory
from metrics import power
from telemetry.page import page_test
class Media(page_test.PageTest):
"""The MediaMeasurement class gathers media-related metrics on a page set.
Media metrics recorded are controlled by metrics/media.js. At the end of the
test each metric for every media element in the page are reported.
"""
def __init__(self):
super(Media, self).__init__('RunMediaMetrics')
self._media_metric = None
# Used to add browser power and CPU metrics to results per test.
self._add_browser_metrics = False
self._cpu_metric = None
self._memory_metric = None
self._power_metric = None
def WillStartBrowser(self, browser):
self._power_metric = power.PowerMetric(browser)
def CustomizeBrowserOptions(self, options):
# Needed to run media actions in JS on touch-based devices as on Android.
options.AppendExtraBrowserArgs(
'--disable-gesture-requirement-for-media-playback')
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated."""
self._media_metric = media.MediaMetric(tab)
self._media_metric.Start(page, tab)
# Reset to false for every page.
self._add_browser_metrics = (page.add_browser_metrics
if hasattr(page, 'add_browser_metrics') else False)
if self._add_browser_metrics:
self._cpu_metric = cpu.CpuMetric(tab.browser)
self._cpu_metric.Start(page, tab)
self._memory_metric = system_memory.SystemMemoryMetric(tab.browser)
self._memory_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
"""Measure the page's performance."""
self._media_metric.Stop(page, tab)
trace_name = self._media_metric.AddResults(tab, results)
if self._add_browser_metrics:
self._cpu_metric.Stop(page, tab)
self._memory_metric.Stop(page, tab)
self._power_metric.Stop(page, tab)
self._cpu_metric.AddResults(tab, results, trace_name=trace_name)
exclude_metrics = ['WorkingSetSizePeak', 'SystemCommitCharge', 'VMPeak',
'VM']
self._memory_metric.AddResults(tab, results,
trace_name=trace_name,
exclude_metrics=exclude_metrics)
self._power_metric.AddResults(tab, results)
| bsd-3-clause |
andrewklau/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/filter_plugins/openshift_node.py | 47 | 1796 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-node
'''
from ansible import errors
class FilterModule(object):
''' Custom ansible filters for use by openshift_node role'''
@staticmethod
def get_dns_ip(openshift_dns_ip, hostvars):
''' Navigates the complicated logic of when to set dnsIP
In all situations if they've set openshift_dns_ip use that
For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None
For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip)
For 1.2/3.2+ installs we set to the node's default interface ip
'''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
# We always use what they've specified if they've specified a value
if openshift_dns_ip != None:
return openshift_dns_ip
if bool(hostvars['openshift']['common']['use_dnsmasq']):
return hostvars['ansible_default_ipv4']['address']
elif bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
if 'openshift_master_cluster_vip' in hostvars:
return hostvars['openshift_master_cluster_vip']
else:
if 'openshift_master_cluster_vip' in hostvars:
return hostvars['openshift_master_cluster_vip']
elif 'openshift_node_first_master_ip' in hostvars:
return hostvars['openshift_node_first_master_ip']
return None
def filters(self):
''' returns a mapping of filters to methods '''
return {'get_dns_ip': self.get_dns_ip}
| apache-2.0 |
drnextgis/QGIS | tests/src/python/test_qgsdoccoverage.py | 6 | 4437 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for API documentation coverage.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '01/02/2015'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
from qgis.testing import unittest
from termcolor import colored
from utilities import DoxygenParser
from acceptable_missing_doc import ACCEPTABLE_MISSING_DOCS, ACCEPTABLE_MISSING_ADDED_NOTE, ACCEPTABLE_MISSING_BRIEF
# TO regenerate the list:
# uncomment the lines under the `# GEN LIST`
# $ export PYTHONPATH=build/output/python
# $ export QGIS_PREFIX_PATH=build/output
# $ python tests/src/python/test_qgsdoccoverage.py
# copy the output to the file:
# tests/src/python/acceptable_missing_doc.py
# in `ACCEPTABLE_MISSING_DOCS = { <past> }`.
class TestQgsDocCoverage(unittest.TestCase):
def testCoverage(self):
print('CTEST_FULL_OUTPUT')
prefixPath = os.environ['QGIS_PREFIX_PATH']
docPath = os.path.join(prefixPath, '..', 'doc', 'api', 'xml')
parser = DoxygenParser(docPath, ACCEPTABLE_MISSING_DOCS, ACCEPTABLE_MISSING_ADDED_NOTE, ACCEPTABLE_MISSING_BRIEF)
coverage = 100.0 * parser.documented_members / parser.documentable_members
missing = parser.documentable_members - parser.documented_members
print("---------------------------------")
print(("{} total documentable members".format(parser.documentable_members)))
print(("{} total contain valid documentation".format(parser.documented_members)))
print(("Total documentation coverage {}%".format(coverage)))
print("---------------------------------")
print(("{} members missing documentation".format(missing)))
print("---------------------------------")
print("Unacceptable missing documentation:")
if parser.undocumented_members:
for cls, props in list(parser.undocumented_members.items()):
print(('\n\nClass {}, {}/{} members documented\n'.format(colored(cls, 'yellow'), props['documented'], props['members'])))
for mem in props['missing_members']:
print((colored(' ' + mem, 'yellow', attrs=['bold'])))
# self.assertEquals(len(parser.undocumented_string), 0, 'FAIL: new undocumented members have been introduced, please add documentation for these members')
if parser.classes_missing_group:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without Doxygen group tag ("\ingroup"):'.format(len(parser.classes_missing_group)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_group])))
if parser.classes_missing_version_added:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without a version added doxygen note ("@note added in QGIS x.xx"):'.format(len(parser.classes_missing_version_added)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_version_added])))
if parser.classes_missing_brief:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without at least a brief description:'.format(len(parser.classes_missing_brief)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_brief])))
sys.stdout.flush()
self.assertTrue(not parser.undocumented_members, 'Undocumented members found')
self.assertTrue(not parser.classes_missing_group, 'Classes without \group tag found')
self.assertTrue(not parser.classes_missing_version_added, 'Classes without version added note found')
self.assertTrue(not parser.classes_missing_brief, 'Classes without brief description found')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
coiax/-tg-station | SQL/ban_conversion_2018-10-28.py | 51 | 9890 | #Python 3+ Script for converting ban table format as of 2018-10-28 made by Jordie0608
#
#Before starting ensure you have installed the mysqlclient package https://github.com/PyMySQL/mysqlclient-python
#It can be downloaded from command line with pip:
#pip install mysqlclient
#
#You will also have to create a new ban table for inserting converted data to per the schema:
#CREATE TABLE `ban` (
# `id` INT(11) UNSIGNED NOT NULL AUTO_INCREMENT,
# `bantime` DATETIME NOT NULL,
# `server_ip` INT(10) UNSIGNED NOT NULL,
# `server_port` SMALLINT(5) UNSIGNED NOT NULL,
# `round_id` INT(11) UNSIGNED NOT NULL,
# `role` VARCHAR(32) NULL DEFAULT NULL,
# `expiration_time` DATETIME NULL DEFAULT NULL,
# `applies_to_admins` TINYINT(1) UNSIGNED NOT NULL DEFAULT '0',
# `reason` VARCHAR(2048) NOT NULL,
# `ckey` VARCHAR(32) NULL DEFAULT NULL,
# `ip` INT(10) UNSIGNED NULL DEFAULT NULL,
# `computerid` VARCHAR(32) NULL DEFAULT NULL,
# `a_ckey` VARCHAR(32) NOT NULL,
# `a_ip` INT(10) UNSIGNED NOT NULL,
# `a_computerid` VARCHAR(32) NOT NULL,
# `who` VARCHAR(2048) NOT NULL,
# `adminwho` VARCHAR(2048) NOT NULL,
# `edits` TEXT NULL DEFAULT NULL,
# `unbanned_datetime` DATETIME NULL DEFAULT NULL,
# `unbanned_ckey` VARCHAR(32) NULL DEFAULT NULL,
# `unbanned_ip` INT(10) UNSIGNED NULL DEFAULT NULL,
# `unbanned_computerid` VARCHAR(32) NULL DEFAULT NULL,
# `unbanned_round_id` INT(11) UNSIGNED NULL DEFAULT NULL,
# PRIMARY KEY (`id`),
# KEY `idx_ban_isbanned` (`ckey`,`role`,`unbanned_datetime`,`expiration_time`),
# KEY `idx_ban_isbanned_details` (`ckey`,`ip`,`computerid`,`role`,`unbanned_datetime`,`expiration_time`),
# KEY `idx_ban_count` (`bantime`,`a_ckey`,`applies_to_admins`,`unbanned_datetime`,`expiration_time`)
#) ENGINE=InnoDB DEFAULT CHARSET=latin1;
#This is to prevent the destruction of existing data and allow rollbacks to be performed in the event of an error during conversion
#Once conversion is complete remember to rename the old and new ban tables; it's up to you if you want to keep the old table
#
#To view the parameters for this script, execute it with the argument --help
#All the positional arguments are required, remember to include prefixes in your table names if you use them
#An example of the command used to execute this script from powershell:
#python ban_conversion_2018-10-28.py "localhost" "root" "password" "feedback" "SS13_ban" "SS13_ban_new"
#I found that this script would complete conversion of 35000 rows in approximately 20 seconds, results will depend on the size of your ban table and computer used
#
#The script has been tested to complete with tgstation's ban table as of 2018-09-02 02:19:56
#In the event of an error the new ban table is automatically truncated
#The source table is never modified so you don't have to worry about losing any data due to errors
#Some additional error correction is performed to fix problems specific to legacy and invalid data in tgstation's ban table, these operations are tagged with a 'TG:' comment
#Even if you don't have any of these specific problems in your ban table the operations won't have matter as they have an insignificant effect on runtime
#
#While this script is safe to run with your game server(s) active, any bans created after the script has started won't be converted
#You will also have to ensure that the code and table names are updated between rounds as neither will be compatible
import MySQLdb
import argparse
import sys
from datetime import datetime
def parse_role(bantype, job):
if bantype in ("PERMABAN", "TEMPBAN", "ADMIN_PERMABAN", "ADMIN_TEMPBAN"):
role = "Server"
else:
#TG: Some legacy jobbans are missing the last character from their job string.
job_name_fixes = {"A":"AI", "Captai":"Captain", "Cargo Technicia":"Cargo Technician", "Chaplai":"Chaplain", "Che":"Chef", "Chemis":"Chemist", "Chief Enginee":"Chief Engineer", "Chief Medical Office":"Chief Medical Officer", "Cybor":"Cyborg", "Detectiv":"Detective", "Head of Personne":"Head of Personnel", "Head of Securit":"Head of Security", "Mim":"Mime", "pA":"pAI", "Quartermaste":"Quartermaster", "Research Directo":"Research Director", "Scientis":"Scientist", "Security Office":"Security Officer", "Station Enginee":"Station Engineer", "Syndicat":"Syndicate", "Warde":"Warden"}
keep_job_names = ("AI", "Head of Personnel", "Head of Security", "OOC", "pAI")
if job in job_name_fixes:
role = job_name_fixes[job]
#Some job names we want to keep the same as .title() would return a different string.
elif job in keep_job_names:
role = job
#And then there's this asshole.
elif job == "servant of Ratvar":
role = "Servant of Ratvar"
else:
role = job.title()
return role
def parse_admin(bantype):
if bantype in ("ADMIN_PERMABAN", "ADMIN_TEMPBAN"):
return 1
else:
return 0
def parse_datetime(bantype, expiration_time):
if bantype in ("PERMABAN", "JOB_PERMABAN", "ADMIN_PERMABAN"):
expiration_time = None
#TG: two bans with an invalid expiration_time due to admins setting the duration to approx. 19 billion years, I'm going to count them as permabans.
elif expiration_time == "0000-00-00 00:00:00":
expiration_time = None
elif not expiration_time:
expiration_time = None
return expiration_time
def parse_not_null(field):
if not field:
field = 0
return field
def parse_for_empty(field):
if not field:
field = None
#TG: Several bans from 2012, probably from clients disconnecting while a ban was being made.
elif field == "BLANK CKEY ERROR":
field = None
return field
if sys.version_info[0] < 3:
raise Exception("Python must be at least version 3 for this script.")
current_round = 0
parser = argparse.ArgumentParser()
parser.add_argument("address", help="MySQL server address (use localhost for the current computer)")
parser.add_argument("username", help="MySQL login username")
parser.add_argument("password", help="MySQL login username")
parser.add_argument("database", help="Database name")
parser.add_argument("curtable", help="Name of the current ban table (remember prefixes if you use them)")
parser.add_argument("newtable", help="Name of the new table to insert to, can't be same as the source table (remember prefixes)")
args = parser.parse_args()
db=MySQLdb.connect(host=args.address, user=args.username, passwd=args.password, db=args.database)
cursor=db.cursor()
current_table = args.curtable
new_table = args.newtable
#TG: Due to deleted rows and a legacy ban import being inserted from id 3140 id order is not contiguous or in line with date order. While technically valid, it's confusing and I don't like that.
#TG: So instead of just running through to MAX(id) we're going to reorder the records by bantime as we go.
cursor.execute("SELECT id FROM " + current_table + " ORDER BY bantime ASC")
id_list = cursor.fetchall()
start_time = datetime.now()
print("Beginning conversion at {0}".format(start_time.strftime("%Y-%m-%d %H:%M:%S")))
try:
for current_id in id_list:
if current_id[0] % 5000 == 0:
cur_time = datetime.now()
print("Reached row ID {0} Duration: {1}".format(current_id[0], cur_time - start_time))
cursor.execute("SELECT * FROM " + current_table + " WHERE id = %s", [current_id[0]])
query_row = cursor.fetchone()
if not query_row:
continue
else:
#TG: bans with an empty reason which were somehow created with almost every field being null or empty, we can't do much but skip this
if not query_row[6]:
continue
bantime = query_row[1]
server_ip = query_row[2]
server_port = query_row[3]
round_id = query_row[4]
applies_to_admins = parse_admin(query_row[5])
reason = query_row[6]
role = parse_role(query_row[5], query_row[7])
expiration_time = parse_datetime(query_row[5], query_row[9])
ckey = parse_for_empty(query_row[10])
computerid = parse_for_empty(query_row[11])
ip = parse_for_empty(query_row[12])
a_ckey = parse_not_null(query_row[13])
a_computerid = parse_not_null(query_row[14])
a_ip = parse_not_null(query_row[15])
who = query_row[16]
adminwho = query_row[17]
edits = parse_for_empty(query_row[18])
unbanned_datetime = parse_datetime(None, query_row[20])
unbanned_ckey = parse_for_empty(query_row[21])
unbanned_computerid = parse_for_empty(query_row[22])
unbanned_ip = parse_for_empty(query_row[23])
cursor.execute("INSERT INTO " + new_table + " (bantime, server_ip, server_port, round_id, role, expiration_time, applies_to_admins, reason, ckey, ip, computerid, a_ckey, a_ip, a_computerid, who, adminwho, edits, unbanned_datetime, unbanned_ckey, unbanned_ip, unbanned_computerid) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", (bantime, server_ip, server_port, round_id, role, expiration_time, applies_to_admins, reason, ckey, ip, computerid, a_ckey, a_ip, a_computerid, who, adminwho, edits, unbanned_datetime, unbanned_ckey, unbanned_ip, unbanned_computerid))
db.commit()
end_time = datetime.now()
print("Conversion completed at {0}".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
print("Script duration: {0}".format(end_time - start_time))
except Exception as e:
end_time = datetime.now()
print("Error encountered on row ID {0} at {1}".format(current_id[0], datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
print("Script duration: {0}".format(end_time - start_time))
cursor.execute("TRUNCATE {0} ".format(new_table))
raise e
cursor.close()
| agpl-3.0 |
pief/python-netsnmpagent | examples/threading_agent.py | 1 | 7250 | #!/usr/bin/env python
#
# python-netsnmpagent example agent with threading
#
# Copyright (c) 2013-2019 Pieter Hollants <pieter@hollants.com>
# Licensed under the GNU Lesser Public License (LGPL) version 3
#
#
# simple_agent.py demonstrates registering the various SNMP object types quite
# nicely but uses an inferior control flow logic: the main loop blocks in
# net-snmp's check_and_process() call until some event happens (eg. SNMP
# requests need processing). Only then will data be updated, not inbetween. And
# on the other hand, SNMP requests can not be handled while data is being
# updated, which might take longer periods of time.
#
# This example agent uses a more real life-suitable approach by outsourcing the
# data update process into a separate thread that gets woken up through an
# SIGALRM handler at an configurable interval. This does only ensure periodic
# data updates, it also makes sure that SNMP requests will always be replied to
# in time.
#
# Note that this implementation does not address possible locking issues: if
# a SNMP client's requests are processed while the data update thread is in the
# midst of refreshing the SNMP objects, the client might receive partially
# inconsistent data.
#
# Use the included script run_threading_agent.sh to test this example.
#
# Alternatively, see the comment block in the head of simple_agent.py for
# adaptable instructions how to run this example against a system-wide snmpd
# instance.
#
import sys, os, signal, time
import optparse, threading, subprocess
# Make sure we use the local copy, not a system-wide one
sys.path.insert(0, os.path.dirname(os.getcwd()))
import netsnmpagent
prgname = sys.argv[0]
# Process command line arguments
parser = optparse.OptionParser()
parser.add_option(
"-i",
"--interval",
dest="interval",
help="Set interval in seconds between data updates",
default=30
)
parser.add_option(
"-m",
"--mastersocket",
dest="mastersocket",
help="Sets the transport specification for the master agent's AgentX socket",
default="/var/run/agentx/master"
)
parser.add_option(
"-p",
"--persistencedir",
dest="persistencedir",
help="Sets the path to the persistence directory",
default="/var/lib/net-snmp"
)
(options, args) = parser.parse_args()
headerlogged = 0
def LogMsg(msg):
""" Writes a formatted log message with a timestamp to stdout. """
global headerlogged
if headerlogged == 0:
print("{0:<8} {1:<90} {2}".format(
"Time",
"MainThread",
"UpdateSNMPObjsThread"
))
print("{0:-^120}".format("-"))
headerlogged = 1
threadname = threading.currentThread().name
funcname = sys._getframe(1).f_code.co_name
if funcname == "<module>":
funcname = "Main code path"
elif funcname == "LogNetSnmpMsg":
funcname = "net-snmp code"
else:
funcname = "{0}()".format(funcname)
if threadname == "MainThread":
logmsg = "{0} {1:<112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
else:
logmsg = "{0} {1:>112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
print(logmsg)
def LogNetSnmpMsg(priority, msg):
""" Log handler for log messages generated by net-snmp code. """
LogMsg("[{0}] {1}.".format(priority, msg))
# Create an instance of the netsnmpAgent class
try:
agent = netsnmpagent.netsnmpAgent(
AgentName = "ThreadingAgent",
MasterSocket = options.mastersocket,
PersistenceDir = options.persistencedir,
MIBFiles = [ os.path.abspath(os.path.dirname(sys.argv[0])) +
"/THREADING-MIB.txt" ],
LogHandler = LogNetSnmpMsg,
)
except netsnmpagent.netsnmpAgentException as e:
print("{0}: {1}".format(prgname, e))
sys.exit(1)
# Register the only SNMP object we server, a DisplayString
threadingString = agent.DisplayString(
oidstr = "THREADING-MIB::threadingString",
initval = "<No data available yet>"
)
def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.")
def UpdateSNMPObjsAsync():
""" Starts UpdateSNMPObjs() in a separate thread. """
# UpdateSNMPObjs() will be executed in a separate thread so that the main
# thread can continue looping and processing SNMP requests while the data
# update is still in progress. However we'll make sure only one update
# thread is run at any time, even if the data update interval has been set
# too low.
if threading.active_count() == 1:
LogMsg("Creating thread for UpdateSNMPObjs().")
t = threading.Thread(target=UpdateSNMPObjs, name="UpdateSNMPObjsThread")
t.daemon = True
t.start()
else:
LogMsg("Data update still active, data update interval too low?")
# Start the agent (eg. connect to the master agent).
try:
agent.start()
except netsnmpagent.netsnmpAgentException as e:
LogMsg("{0}: {1}".format(prgname, e))
sys.exit(1)
# Trigger initial data update.
LogMsg("Doing initial call to UpdateSNMPObjsAsync().")
UpdateSNMPObjsAsync()
# Install a signal handler that terminates our threading agent when CTRL-C is
# pressed or a KILL signal is received
def TermHandler(signum, frame):
global loop
loop = False
signal.signal(signal.SIGINT, TermHandler)
signal.signal(signal.SIGTERM, TermHandler)
# Define a signal handler that takes care of updating the data periodically
def AlarmHandler(signum, frame):
global loop, timer_triggered
LogMsg("Got triggered by SIGALRM.")
if loop:
timer_triggered = True
UpdateSNMPObjsAsync()
signal.signal(signal.SIGALRM, AlarmHandler)
signal.setitimer(signal.ITIMER_REAL, float(options.interval))
msg = "Installing SIGALRM handler triggered every {0} seconds."
msg = msg.format(options.interval)
LogMsg(msg)
signal.signal(signal.SIGALRM, AlarmHandler)
signal.setitimer(signal.ITIMER_REAL, float(options.interval))
# The threading agent's main loop. We loop endlessly until our signal
# handler above changes the "loop" variable.
LogMsg("Now serving SNMP requests, press ^C to terminate.")
loop = True
while loop:
# Block until something happened (signal arrived, SNMP packets processed)
timer_triggered = False
res = agent.check_and_process()
if res == -1 and not timer_triggered and loop:
loop = False
LogMsg("Error {0} in SNMP packet processing!".format(res))
elif loop and timer_triggered:
LogMsg("net-snmp's check_and_process() returned due to SIGALRM (res={0}), doing another loop.".format(res))
elif loop:
LogMsg("net-snmp's check_and_process() returned (res={0}), doing another loop.".format(res))
LogMsg("Terminating.")
agent.shutdown()
| lgpl-3.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/notebook/auth/security.py | 6 | 2762 | """
Password generation for the Notebook.
"""
import getpass
import hashlib
import random
from ipython_genutils.py3compat import cast_bytes, str_to_bytes
# Length of the salt in nr of hex chars, which implies salt_len * 4
# bits of randomness.
salt_len = 12
def passwd(passphrase=None, algorithm='sha1'):
"""Generate hashed password and salt for use in notebook configuration.
In the notebook configuration, set `c.NotebookApp.password` to
the generated string.
Parameters
----------
passphrase : str
Password to hash. If unspecified, the user is asked to input
and verify a password.
algorithm : str
Hashing algorithm to use (e.g, 'sha1' or any argument supported
by :func:`hashlib.new`).
Returns
-------
hashed_passphrase : str
Hashed password, in the format 'hash_algorithm:salt:passphrase_hash'.
Examples
--------
>>> passwd('mypassword')
'sha1:7cf3:b7d6da294ea9592a9480c8f52e63cd42cfb9dd12'
"""
if passphrase is None:
for i in range(3):
p0 = getpass.getpass('Enter password: ')
p1 = getpass.getpass('Verify password: ')
if p0 == p1:
passphrase = p0
break
else:
print('Passwords do not match.')
else:
raise ValueError('No matching passwords found. Giving up.')
h = hashlib.new(algorithm)
salt = ('%0' + str(salt_len) + 'x') % random.getrandbits(4 * salt_len)
h.update(cast_bytes(passphrase, 'utf-8') + str_to_bytes(salt, 'ascii'))
return ':'.join((algorithm, salt, h.hexdigest()))
def passwd_check(hashed_passphrase, passphrase):
"""Verify that a given passphrase matches its hashed version.
Parameters
----------
hashed_passphrase : str
Hashed password, in the format returned by `passwd`.
passphrase : str
Passphrase to validate.
Returns
-------
valid : bool
True if the passphrase matches the hash.
Examples
--------
>>> from notebook.auth.security import passwd_check
>>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
... 'mypassword')
True
>>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
... 'anotherpassword')
False
"""
try:
algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)
except (ValueError, TypeError):
return False
try:
h = hashlib.new(algorithm)
except ValueError:
return False
if len(pw_digest) == 0:
return False
h.update(cast_bytes(passphrase, 'utf-8') + cast_bytes(salt, 'ascii'))
return h.hexdigest() == pw_digest
| mit |
faywong/FFPlayer | project/jni/python/src/Lib/sre_parse.py | 61 | 26878 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
def set(seq):
s = {}
for elem in seq:
s[elem] = 1
return s
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if hi:
max = int(hi)
if max < min:
raise error, "bad repeat interval"
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not isname(name):
raise error, "bad character in group name"
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not isname(name):
raise error, "bad character in group name"
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "bad group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
| lgpl-2.1 |
msmolens/VTK | ThirdParty/Twisted/twisted/protocols/pcp.py | 71 | 7090 | # -*- test-case-name: twisted.test.test_pcp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Producer-Consumer Proxy.
"""
from zope.interface import implements
from twisted.internet import interfaces
class BasicProducerConsumerProxy:
"""
I can act as a man in the middle between any Producer and Consumer.
@ivar producer: the Producer I subscribe to.
@type producer: L{IProducer<interfaces.IProducer>}
@ivar consumer: the Consumer I publish to.
@type consumer: L{IConsumer<interfaces.IConsumer>}
@ivar paused: As a Producer, am I paused?
@type paused: bool
"""
implements(interfaces.IProducer, interfaces.IConsumer)
consumer = None
producer = None
producerIsStreaming = None
iAmStreaming = True
outstandingPull = False
paused = False
stopped = False
def __init__(self, consumer):
self._buffer = []
if consumer is not None:
self.consumer = consumer
consumer.registerProducer(self, self.iAmStreaming)
# Producer methods:
def pauseProducing(self):
self.paused = True
if self.producer:
self.producer.pauseProducing()
def resumeProducing(self):
self.paused = False
if self._buffer:
# TODO: Check to see if consumer supports writeSeq.
self.consumer.write(''.join(self._buffer))
self._buffer[:] = []
else:
if not self.iAmStreaming:
self.outstandingPull = True
if self.producer is not None:
self.producer.resumeProducing()
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
if self.consumer is not None:
del self.consumer
# Consumer methods:
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
self.consumer.write(data)
self.outstandingPull = False
def finish(self):
if self.consumer is not None:
self.consumer.finish()
self.unregisterProducer()
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerIsStreaming = streaming
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
if self.consumer:
self.consumer.unregisterProducer()
def __repr__(self):
return '<%s@%x around %s>' % (self.__class__, id(self), self.consumer)
class ProducerConsumerProxy(BasicProducerConsumerProxy):
"""ProducerConsumerProxy with a finite buffer.
When my buffer fills up, I have my parent Producer pause until my buffer
has room in it again.
"""
# Copies much from abstract.FileDescriptor
bufferSize = 2**2**2**2
producerPaused = False
unregistered = False
def pauseProducing(self):
# Does *not* call up to ProducerConsumerProxy to relay the pause
# message through to my parent Producer.
self.paused = True
def resumeProducing(self):
self.paused = False
if self._buffer:
data = ''.join(self._buffer)
bytesSent = self._writeSomeData(data)
if bytesSent < len(data):
unsent = data[bytesSent:]
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer[:] = [unsent]
else:
self._buffer[:] = []
else:
bytesSent = 0
if (self.unregistered and bytesSent and not self._buffer and
self.consumer is not None):
self.consumer.unregisterProducer()
if not self.iAmStreaming:
self.outstandingPull = not bytesSent
if self.producer is not None:
bytesBuffered = sum([len(s) for s in self._buffer])
# TODO: You can see here the potential for high and low
# watermarks, where bufferSize would be the high mark when we
# ask the upstream producer to pause, and we wouldn't have
# it resume again until it hit the low mark. Or if producer
# is Pull, maybe we'd like to pull from it as much as necessary
# to keep our buffer full to the low mark, so we're never caught
# without something to send.
if self.producerPaused and (bytesBuffered < self.bufferSize):
# Now that our buffer is empty,
self.producerPaused = False
self.producer.resumeProducing()
elif self.outstandingPull:
# I did not have any data to write in response to a pull,
# so I'd better pull some myself.
self.producer.resumeProducing()
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
assert not self._buffer, (
"Writing fresh data to consumer before my buffer is empty!")
# I'm going to use _writeSomeData here so that there is only one
# path to self.consumer.write. But it doesn't actually make sense,
# if I am streaming, for some data to not be all data. But maybe I
# am not streaming, but I am writing here anyway, because there was
# an earlier request for data which was not answered.
bytesSent = self._writeSomeData(data)
self.outstandingPull = False
if not bytesSent == len(data):
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer.append(data[bytesSent:])
if (self.producer is not None) and self.producerIsStreaming:
bytesBuffered = sum([len(s) for s in self._buffer])
if bytesBuffered >= self.bufferSize:
self.producer.pauseProducing()
self.producerPaused = True
def registerProducer(self, producer, streaming):
self.unregistered = False
BasicProducerConsumerProxy.registerProducer(self, producer, streaming)
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
self.unregistered = True
if self.consumer and not self._buffer:
self.consumer.unregisterProducer()
def _writeSomeData(self, data):
"""Write as much of this data as possible.
@returns: The number of bytes written.
"""
if self.consumer is None:
return 0
self.consumer.write(data)
return len(data)
| bsd-3-clause |
Pluto-tv/chromium-crosswalk | native_client_sdk/src/doc/doxygen/generate_docs.py | 22 | 9794 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to regenerate API docs using doxygen.
"""
import argparse
import collections
import json
import os
import shutil
import subprocess
import sys
import tempfile
import urllib2
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DOC_DIR = os.path.dirname(SCRIPT_DIR)
ChannelInfo = collections.namedtuple('ChannelInfo', ['branch', 'version'])
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def GetChannelInfo():
url = 'http://omahaproxy.appspot.com/json'
u = urllib2.urlopen(url)
try:
data = json.loads(u.read())
finally:
u.close()
channel_info = {}
for os_row in data:
osname = os_row['os']
if osname not in ('win', 'mac', 'linux'):
continue
for version_row in os_row['versions']:
channel = version_row['channel']
# We don't display canary docs.
if channel == 'canary':
continue
version = version_row['version'].split('.')[0] # Major version
branch = version_row['true_branch']
if branch is None:
branch = 'trunk'
if channel in channel_info:
existing_info = channel_info[channel]
if branch != existing_info.branch:
sys.stderr.write('Warning: found different branch numbers for '
'channel %s: %s vs %s. Using %s.\n' % (
channel, branch, existing_info.branch, existing_info.branch))
else:
channel_info[channel] = ChannelInfo(branch, version)
return channel_info
def RemoveFile(filename):
if os.path.exists(filename):
os.remove(filename)
def RemoveDir(dirname):
if os.path.exists(dirname):
shutil.rmtree(dirname)
def HasBranchHeads():
cmd = ['git', 'for-each-ref', '--format=%(refname)',
'refs/remotes/branch-heads']
output = subprocess.check_output(cmd).splitlines()
return output != []
def CheckoutDirectories(dest_dirname, refname, root_path, patterns=None):
treeish = '%s:%s' % (refname, root_path)
cmd = ['git', 'ls-tree', '--full-tree', '-r', treeish]
if patterns:
cmd.extend(patterns)
Trace('Running \"%s\":' % ' '.join(cmd))
output = subprocess.check_output(cmd)
for line in output.splitlines():
info, rel_filename = line.split('\t')
sha = info.split(' ')[2]
Trace(' %s %s' % (sha, rel_filename))
cmd = ['git', 'show', sha]
blob = subprocess.check_output(cmd)
filename = os.path.join(dest_dirname, rel_filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
Trace(' writing to %s' % filename)
with open(filename, 'w') as f:
f.write(blob)
def CheckoutPepperDocs(branch, doc_dirname):
Trace('Removing directory %s' % doc_dirname)
RemoveDir(doc_dirname)
if branch == 'master':
refname = 'refs/remotes/origin/master'
else:
refname = 'refs/remotes/branch-heads/%s' % branch
Trace('Checking out docs into %s' % doc_dirname)
subdirs = ['api', 'generators', 'cpp', 'utility']
CheckoutDirectories(doc_dirname, refname, 'ppapi', subdirs)
# The IDL generator needs PLY (a python lexing library); check it out into
# generators.
ply_dirname = os.path.join(doc_dirname, 'generators', 'ply')
Trace('Checking out PLY into %s' % ply_dirname)
CheckoutDirectories(ply_dirname, refname, 'third_party/ply')
def FixPepperDocLinks(doc_dirname):
# TODO(binji): We can remove this step when the correct links are in the
# stable branch.
Trace('Looking for links to fix in Pepper headers...')
for root, dirs, filenames in os.walk(doc_dirname):
# Don't recurse into .svn
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
header_filename = os.path.join(root, filename)
Trace(' Checking file %r...' % header_filename)
replacements = {
'<a href="/native-client/{{pepperversion}}/devguide/coding/audio">':
'<a href="/native-client/devguide/coding/audio.html">',
'<a href="/native-client/devguide/coding/audio">':
'<a href="/native-client/devguide/coding/audio.html">',
'<a href="/native-client/{{pepperversion}}/pepperc/globals_defs"':
'<a href="globals_defs.html"',
'<a href="../pepperc/ppb__image__data_8h.html">':
'<a href="../c/ppb__image__data_8h.html">'}
with open(header_filename) as f:
lines = []
replaced = False
for line in f:
for find, replace in replacements.iteritems():
pos = line.find(find)
if pos != -1:
Trace(' Found %r...' % find)
replaced = True
line = line[:pos] + replace + line[pos + len(find):]
lines.append(line)
if replaced:
Trace(' Writing new file.')
with open(header_filename, 'w') as f:
f.writelines(lines)
def GenerateCHeaders(pepper_version, doc_dirname):
script = os.path.join(os.pardir, 'generators', 'generator.py')
cwd = os.path.join(doc_dirname, 'api')
out_dirname = os.path.join(os.pardir, 'c')
cmd = [sys.executable, script, '--cgen', '--release', 'M' + pepper_version,
'--wnone', '--dstroot', out_dirname]
Trace('Generating C Headers for version %s\n %s' % (
pepper_version, ' '.join(cmd)))
subprocess.check_call(cmd, cwd=cwd)
def GenerateDoxyfile(template_filename, out_dirname, doc_dirname, doxyfile):
Trace('Writing Doxyfile "%s" (from template %s)' % (
doxyfile, template_filename))
with open(template_filename) as f:
data = f.read()
with open(doxyfile, 'w') as f:
f.write(data % {
'out_dirname': out_dirname,
'doc_dirname': doc_dirname,
'script_dirname': SCRIPT_DIR})
def RunDoxygen(out_dirname, doxyfile):
Trace('Removing old output directory %s' % out_dirname)
RemoveDir(out_dirname)
Trace('Making new output directory %s' % out_dirname)
os.makedirs(out_dirname)
doxygen = os.environ.get('DOXYGEN', 'doxygen')
cmd = [doxygen, doxyfile]
Trace('Running Doxygen:\n %s' % ' '.join(cmd))
subprocess.check_call(cmd)
def RunDoxyCleanup(out_dirname):
script = os.path.join(SCRIPT_DIR, 'doxy_cleanup.py')
cmd = [sys.executable, script, out_dirname]
if Trace.verbose:
cmd.append('-v')
Trace('Running doxy_cleanup:\n %s' % ' '.join(cmd))
subprocess.check_call(cmd)
def RunRstIndex(kind, channel, pepper_version, out_dirname, out_rst_filename):
assert kind in ('root', 'c', 'cpp')
script = os.path.join(SCRIPT_DIR, 'rst_index.py')
cmd = [sys.executable, script,
'--' + kind,
'--channel', channel,
'--version', pepper_version,
out_dirname,
out_rst_filename]
Trace('Running rst_index:\n %s' % ' '.join(cmd))
subprocess.check_call(cmd)
def GetRstName(kind, channel):
if channel == 'stable':
filename = '%s-api.rst' % kind
else:
filename = '%s-api-%s.rst' % (kind, channel)
return os.path.join(DOC_DIR, filename)
def GenerateDocs(root_dirname, channel, pepper_version, branch):
Trace('Generating docs for %s (branch %s)' % (channel, branch))
pepper_dirname = 'pepper_%s' % channel
out_dirname = os.path.join(root_dirname, pepper_dirname)
try:
svn_dirname = tempfile.mkdtemp(prefix=pepper_dirname)
doxyfile_dirname = tempfile.mkdtemp(prefix='%s_doxyfiles' % pepper_dirname)
CheckoutPepperDocs(branch, svn_dirname)
FixPepperDocLinks(svn_dirname)
GenerateCHeaders(pepper_version, svn_dirname)
doxyfile_c = ''
doxyfile_cpp = ''
# Generate Root index
rst_index_root = os.path.join(DOC_DIR, pepper_dirname, 'index.rst')
RunRstIndex('root', channel, pepper_version, out_dirname, rst_index_root)
# Generate C docs
out_dirname_c = os.path.join(out_dirname, 'c')
doxyfile_c = os.path.join(doxyfile_dirname, 'Doxyfile.c.%s' % channel)
doxyfile_c_template = os.path.join(SCRIPT_DIR, 'Doxyfile.c.template')
rst_index_c = GetRstName('c', channel)
GenerateDoxyfile(doxyfile_c_template, out_dirname_c, svn_dirname,
doxyfile_c)
RunDoxygen(out_dirname_c, doxyfile_c)
RunDoxyCleanup(out_dirname_c)
RunRstIndex('c', channel, pepper_version, out_dirname_c, rst_index_c)
# Generate C++ docs
out_dirname_cpp = os.path.join(out_dirname, 'cpp')
doxyfile_cpp = os.path.join(doxyfile_dirname, 'Doxyfile.cpp.%s' % channel)
doxyfile_cpp_template = os.path.join(SCRIPT_DIR, 'Doxyfile.cpp.template')
rst_index_cpp = GetRstName('cpp', channel)
GenerateDoxyfile(doxyfile_cpp_template, out_dirname_cpp, svn_dirname,
doxyfile_cpp)
RunDoxygen(out_dirname_cpp, doxyfile_cpp)
RunDoxyCleanup(out_dirname_cpp)
RunRstIndex('cpp', channel, pepper_version, out_dirname_cpp, rst_index_cpp)
finally:
# Cleanup
RemoveDir(svn_dirname)
RemoveDir(doxyfile_dirname)
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose',
help='Verbose output', action='store_true')
parser.add_argument('out_directory')
options = parser.parse_args(argv)
if options.verbose:
Trace.verbose = True
channel_info = GetChannelInfo()
for channel, info in channel_info.iteritems():
GenerateDocs(options.out_directory, channel, info.version, info.branch)
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/266_test_xml_etree.py | 2 | 9817 | # xml.etree test. This file contains enough tests to make sure that
# all included components work as they should. For a more extensive
# test suite, see the selftest script in the ElementTree distribution.
import doctest
import sys
from test import test_support
SAMPLE_XML = """
<body>
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import ElementTree
>>> from xml.etree import ElementInclude
>>> from xml.etree import ElementPath
"""
def check_method(method):
if not hasattr(method, '__call__'):
print method, "not callable"
def serialize(ET, elem, encoding=None):
import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
if encoding:
tree.write(file, encoding)
else:
tree.write(file)
return file.getvalue()
def summarize(elem):
return elem.tag
def summarize_list(seq):
return map(summarize, seq)
def interface():
"""
Test element tree interface.
>>> from xml.etree import ElementTree as ET
>>> element = ET.Element("tag", key="value")
>>> tree = ET.ElementTree(element)
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.getiterator)
Basic method sanity checks.
>>> serialize(ET, element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(ET, element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(ET, element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(ET, element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(ET, element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(ET, element) # 6
'<tag key="value" />'
"""
def find():
"""
Test find methods (including xpath syntax).
>>> from xml.etree import ElementTree as ET
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def parseliteral():
r"""
>>> from xml.etree import ElementTree as ET
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> print ET.tostring(element)
<html><body>text</body></html>
>>> print ET.tostring(element, "ascii")
<?xml version='1.0' encoding='ascii'?>
<html><body>text</body></html>
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def check_encoding(ET, encoding):
"""
>>> from xml.etree import ElementTree as ET
>>> check_encoding(ET, "ascii")
>>> check_encoding(ET, "us-ascii")
>>> check_encoding(ET, "iso-8859-1")
>>> check_encoding(ET, "iso-8859-15")
>>> check_encoding(ET, "cp437")
>>> check_encoding(ET, "mac-roman")
"""
ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="samples/simple.xml"/>
</document>
"""
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise IOError("resource not found")
if parse == "xml":
from xml.etree.ElementTree import XML
return XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> from xml.etree import ElementTree as ET
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(ET, document) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(ET, document) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(ET, document) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
Fallback example (XInclude C.5)
Note! Fallback support is not yet implemented
>>> document = xinclude_loader("C5.xml")
>>> ElementInclude.include(document, xinclude_loader)
Traceback (most recent call last):
IOError: resource not found
>>> # print serialize(ET, document) # C5
"""
def test_main():
from test import test_xml_etree
test_support.run_doctest(test_xml_etree, verbosity=True)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
rsteca/python-social-auth | social/apps/pyramid_app/models.py | 67 | 2119 | """Pyramid SQLAlchemy ORM models for Social Auth"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from social.utils import setting_name, module_member
from social.storage.sqlalchemy_orm import SQLAlchemyUserMixin, \
SQLAlchemyAssociationMixin, \
SQLAlchemyNonceMixin, \
SQLAlchemyCodeMixin, \
BaseSQLAlchemyStorage
class PyramidStorage(BaseSQLAlchemyStorage):
user = None
nonce = None
association = None
def init_social(config, Base, session):
if hasattr(config, 'registry'):
config = config.registry.settings
UID_LENGTH = config.get(setting_name('UID_LENGTH'), 255)
User = module_member(config[setting_name('USER_MODEL')])
app_session = session
class _AppSession(object):
@classmethod
def _session(cls):
return app_session
class UserSocialAuth(_AppSession, Base, SQLAlchemyUserMixin):
"""Social Auth association model"""
uid = Column(String(UID_LENGTH))
user_id = Column(Integer, ForeignKey(User.id),
nullable=False, index=True)
user = relationship(User, backref=backref('social_auth',
lazy='dynamic'))
@classmethod
def username_max_length(cls):
return User.__table__.columns.get('username').type.length
@classmethod
def user_model(cls):
return User
class Nonce(_AppSession, Base, SQLAlchemyNonceMixin):
"""One use numbers"""
pass
class Association(_AppSession, Base, SQLAlchemyAssociationMixin):
"""OpenId account association"""
pass
class Code(_AppSession, Base, SQLAlchemyCodeMixin):
pass
# Set the references in the storage class
PyramidStorage.user = UserSocialAuth
PyramidStorage.nonce = Nonce
PyramidStorage.association = Association
PyramidStorage.code = Code
| bsd-3-clause |
joshuahoman/vivisect | vivisect/exc.py | 7 | 1234 |
"""
All the exception types raised by workspace APIs go here.
"""
class InvalidLocation(Exception):
def __init__(self, va, msg=None):
Exception.__init__(self, "Invalid Location 0x%.8x: %s" % (va,msg))
class DuplicateName(Exception):
def __init__(self, origva, newva, name):
Exception.__init__(self, "Duplicate Name: %s at 0x%.8x and 0x%.8x" % (name,origva,newva))
class InvalidVaSet(Exception):
def __init__(self, name):
Exception.__init__(self, "Invalid Va Set Specified: %s" % name)
class InvalidFunction(Exception):
def __init__(self, va):
Exception.__init__(self, "VA 0x%.8x is not a function" % va)
class InvalidCodeBlock(Exception):
def __init__(self, cbva):
Exception.__init__(self, 'VA 0x%.8x is not in a code block!' % va)
class UnknownCallingConvention(Exception):
def __init__(self, fva, cc=None):
Exception.__init__(self, "Function 0x%.8x has unknown CallingConvention: %s" % (fva, cc))
class InvalidWorkspace(Exception):
"""
Raised when a storage module is given bunk data for loading
a workspace.
"""
def __init__(self, nameinfo, errinfo):
Exception.__init__(self, "Failed to load %s: %s" % (nameinfo, errinfo))
| apache-2.0 |
cntnboys/410Lab6 | v1/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py | 43 | 8585 | import re
from .base import FIELD_TYPE
from django.utils.datastructures import OrderedSet
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django.utils.encoding import force_text
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'IntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
"""
# varchar length returned by cursor.description is an internal length,
# not visible length (#5725), use information_schema database to fix this
cursor.execute("""
SELECT column_name, character_maximum_length FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND character_maximum_length IS NOT NULL""", [table_name])
length_map = dict(cursor.fetchall())
# Also getting precision and scale from information_schema (see #5014)
cursor.execute("""
SELECT column_name, numeric_precision, numeric_scale FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND data_type='decimal'""", [table_name])
numeric_map = dict((line[0], tuple(int(n) for n in line[1:])) for line in cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),)
+ line[1:3]
+ (length_map.get(line[0], line[3]),)
+ numeric_map.get(line[0], line[4:6])
+ (line[6],)))
for line in cursor.description]
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict((d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name)))
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieves the storage engine for a given table.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
return cursor.fetchone()[0]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s AND
kc.table_name = %s
"""
cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s AND
c.table_name = %s
"""
cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': True,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
| apache-2.0 |
ningirsu/stepmania-server | test/test_listener/test_workers/test_chat.py | 1 | 4055 | """ Test Chat worker module """
import mock
from smserver import server
from smserver.listener.workers import chat
from smserver.smutils.smpacket import smpacket
from test import utils
from test.factories.room_factory import RoomFactory
from test.factories.connection_factory import ConnectionFactory
from test.factories.user_factory import UserFactory
class ChatWorkerTest(utils.DBTest):
""" Test listener module """
def setUp(self):
super().setUp()
self.server = server.StepmaniaServer()
self.worker = chat.ChatWorker(self.server)
@mock.patch("smserver.smutils.smthread.StepmaniaServer.sendroom")
@mock.patch("smserver.smutils.smthread.StepmaniaServer.sendconnection")
def test_handle_room_message(self, sendconnection, sendroom):
""" Test handling an incomming message """
room = RoomFactory(name="room_name")
connection = ConnectionFactory()
user = UserFactory(connection=connection, online=True, room=room)
self.worker.handle(
{
"target": {"type": "room", "value": room.id},
"message": "message",
"source": connection.token
},
token=connection.token,
session=self.session,
)
sendconnection.assert_not_called()
sendroom.assert_called_once()
self.assertEqual(sendroom.call_args[0][0], room.id)
packet = sendroom.call_args[0][1]
self.assertIsInstance(packet, smpacket.SMPacketServerNSCCM)
self.assertRegex(packet["message"], "room_name")
self.assertRegex(packet["message"], "message")
self.assertRegex(packet["message"], user.name)
@mock.patch("smserver.smutils.smthread.StepmaniaServer.sendroom")
@mock.patch("smserver.smutils.smthread.StepmaniaServer.sendconnection")
def test_handle_user_message(self, sendconnection, sendroom):
""" Test handling an incomming message """
connection = ConnectionFactory()
user = UserFactory(connection=connection, online=True)
connection_target = ConnectionFactory()
self.worker.handle(
{
"target": {"type": "token", "value": connection_target.token},
"message": "message",
"source": connection.token
},
token=connection.token,
session=self.session,
)
sendroom.assert_not_called()
sendconnection.assert_called_once()
self.assertEqual(sendconnection.call_args[0][0], connection_target.token)
packet = sendconnection.call_args[0][1]
self.assertIsInstance(packet, smpacket.SMPacketServerNSCCM)
self.assertRegex(packet["message"], "message")
self.assertRegex(packet["message"], user.name)
@mock.patch("smserver.smutils.smthread.StepmaniaServer.sendconnection")
def test_send_message_token(self, sendconnection):
""" Test sending a message to a specific connection """
self.worker.send_message_token("bla", "token")
sendconnection.assert_called_once()
self.assertEqual(sendconnection.call_args[0][0], 'token')
packet = sendconnection.call_args[0][1]
self.assertEqual(packet["message"], "bla")
self.assertIsInstance(packet, smpacket.SMPacketServerNSCCM)
@mock.patch("smserver.smutils.smthread.StepmaniaServer.sendroom")
def test_send_message_room(self, sendroom):
""" Test sending a message to a specific room """
self.worker.send_message_room("bla", None)
self.assertLog("WARNING")
sendroom.assert_not_called()
room = RoomFactory(name="room_name")
self.worker.send_message_room("bla", room)
sendroom.assert_called_once()
self.assertEqual(sendroom.call_args[0][0], room.id)
packet = sendroom.call_args[0][1]
self.assertIsInstance(packet, smpacket.SMPacketServerNSCCM)
self.assertRegex(packet["message"], "room_name")
self.assertRegex(packet["message"], "bla")
| mit |
HerlanAssis/Django-AulaOsvandoSantana | lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| mit |
kobejean/tensorflow | tensorflow/python/keras/layers/core.py | 4 | 38236 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Masking')
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
return inputs * math_ops.cast(boolean_mask, inputs.dtype)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Dropout')
class Dropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return self.noise_shape
return nn_ops._get_noise_shape(inputs, self.noise_shape) # pylint: disable=protected-access
def call(self, inputs, training=None):
original_training_value = training
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
# EagerTensor object has no attribute _uses_learning_phase
if not context.executing_eagerly() and original_training_value is None:
output._uses_learning_phase = True # pylint: disable=protected-access
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@tf_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@tf_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@tf_export('keras.layers.Activation')
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: name of activation function to use
or alternatively, a Theano or TensorFlow operation.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Reshape')
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: shape of array being reshaped
output_shape: desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises a ValueError if the total array size of the output_shape is
different then the input_shape, or more than one unknown dimension
is specified.
Raises:
ValueError: in case of invalid values
for `input_shape` or `input_shape`.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Permute')
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation `dims` for Permute Layer: %s. '
'The set of indices in `dims` must be consecutive and start from 1.' %
(dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Flatten')
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=2)
def call(self, inputs):
if self.data_format == 'channels_first':
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.get_shape()))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Lambda')
class Lambda(Layer):
"""Wraps arbitrary expression as a `Layer` object.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Arguments:
function: The function to be evaluated.
Takes input tensor as first argument.
output_shape: Expected output shape from function.
This argument can be inferred if not explicitly provided.
Can be a tuple or function.
If a tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input:
`output_shape = (input_shape[0], ) + output_shape`
or, the input is `None` and
the sample dimension is also `None`:
`output_shape = (None, ) + output_shape`
If a function, it specifies the entire shape as a function of the
input shape: `output_shape = f(input_shape)`
arguments: optional dictionary of keyword arguments to be passed
to the function.
Input shape:
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Specified by `output_shape` argument
"""
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
if (output_shape is not None and not isinstance(output_shape,
(tuple, list)) and
not callable(output_shape)):
raise TypeError('In Lambda, `output_shape` '
'must be a list, a tuple, or a function.')
# Convert a list representing a single shape into a tuple.
if (isinstance(output_shape, list) and isinstance(output_shape[0],
(int, type(None)))):
output_shape = tuple(output_shape)
self._output_shape = output_shape
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
if context.executing_eagerly():
# Make use of existing autocomputation for Eager mode but provide
# Lambda-specific error message.
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError('We could not automatically infer '
'the static shape of the Lambda\'s output.'
' Please specify the `output_shape` for'
' this Lambda.')
if isinstance(input_shape, list):
x = [K.placeholder(shape=shape) for shape in input_shape]
else:
x = K.placeholder(shape=input_shape)
x = self.call(x)
if isinstance(x, list):
return [tensor_shape.TensorShape(K.int_shape(x_elem)) for x_elem in x]
else:
return tensor_shape.TensorShape(K.int_shape(x))
elif isinstance(self._output_shape, (tuple, list)):
if isinstance(input_shape, list):
num_samples = input_shape[0][0]
else:
num_samples = input_shape[0] if input_shape else None
# List here represents multiple outputs.
if isinstance(self._output_shape, list):
return [
tensor_shape.TensorShape((num_samples,) + tuple(single_shape))
for single_shape in self._output_shape
]
return tensor_shape.TensorShape((num_samples,) + self._output_shape)
else:
shape = self._output_shape(input_shape)
if not isinstance(shape, (list, tuple)):
raise ValueError(
'`output_shape` function must return a tuple or a list of tuples.')
# List here can represent multiple outputs or single output.
if isinstance(shape, list):
# Convert list representing single output into a tuple.
if isinstance(shape[0], (int, type(None))):
shape = tuple(shape)
else:
return [
tensor_shape.TensorShape(single_shape) for single_shape in shape
]
return tensor_shape.TensorShape(shape)
def call(self, inputs, mask=None):
arguments = self.arguments
if generic_utils.has_arg(self.function, 'mask'):
arguments['mask'] = mask
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
module = self.function.__module__
if isinstance(self.function, python_types.LambdaType):
function = generic_utils.func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
output_shape_module = None
if isinstance(self._output_shape, python_types.LambdaType):
output_shape = generic_utils.func_dump(self._output_shape)
output_shape_type = 'lambda'
output_shape_module = self._output_shape.__module__
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
output_shape_type = 'function'
output_shape_module = self._output_shape.__module__
else:
output_shape = self._output_shape
output_shape_type = 'raw'
config = {
'function': function,
'module': module,
'function_type': function_type,
'output_shape': output_shape,
'output_shape_type': output_shape_type,
'output_shape_module': output_shape_module,
'arguments': self.arguments
}
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
globs = globals()
module = config.pop('module', None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config['function'],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
output_shape_module = config.pop('output_shape_module', None)
if output_shape_module in sys.modules:
globs.update(sys.modules[output_shape_module].__dict__)
elif output_shape_module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(output_shape_module)
, UserWarning)
output_shape_type = config.pop('output_shape_type')
if output_shape_type == 'function':
# Simple lookup in custom objects
output_shape = generic_utils.deserialize_keras_object(
config['output_shape'],
custom_objects=custom_objects,
printable_module_name='output_shape function in Lambda layer')
elif output_shape_type == 'lambda':
# Unsafe deserialization from bytecode
output_shape = generic_utils.func_load(config['output_shape'],
globs=globs)
else:
output_shape = config['output_shape']
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
config['function'] = function
config['output_shape'] = output_shape
return cls(**config)
@tf_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_weight(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
rank = common_shapes.rank(inputs)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/ipa.py | 2 | 1175 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IPA(Model):
"""IP Address details.
:param sub_type: Subtype of the detected IP Address.
:type sub_type: str
:param text: Detected IP Address.
:type text: str
:param index: Index(Location) of the IP Address in the input text content.
:type index: int
"""
_attribute_map = {
'sub_type': {'key': 'SubType', 'type': 'str'},
'text': {'key': 'Text', 'type': 'str'},
'index': {'key': 'Index', 'type': 'int'},
}
def __init__(self, sub_type=None, text=None, index=None):
super(IPA, self).__init__()
self.sub_type = sub_type
self.text = text
self.index = index
| mit |
mancoast/CPythonPyc_test | fail/342_test_fork1.py | 84 | 3783 | """This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import time
from test.fork_wait import ForkWait
from test.support import (run_unittest, reap_children, get_attribute,
import_module, verbose)
threading = import_module('threading')
# Skip test if fork does not exist.
get_attribute(os, 'fork')
class ForkTest(ForkWait):
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(1.0)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(0)
else:
if verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(0)
self.wait_impl(pid)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def test_main():
run_unittest(ForkTest)
reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
github-account-because-they-want-it/django | django/core/files/locks.py | 725 | 3516 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
| bsd-3-clause |
eino-makitalo/odoo | addons/hr_timesheet_sheet/__openerp__.py | 259 | 2886 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Timesheets',
'version': '1.0',
'category': 'Human Resources',
'sequence': 24,
'summary': 'Timesheets, Attendances, Activities',
'description': """
Record and validate timesheets and attendances easily
=====================================================
This application supplies a new screen enabling you to manage both attendances (Sign in/Sign out) and your work encoding (timesheet) by period. Timesheet entries are made by employees each day. At the end of the defined period, employees validate their sheet and the manager must then approve his team's entries. Periods are defined in the company forms and you can set them to run monthly or weekly.
The complete timesheet validation process is:
---------------------------------------------
* Draft sheet
* Confirmation at the end of the period by the employee
* Validation by the project manager
The validation can be configured in the company:
------------------------------------------------
* Period size (Day, Week, Month)
* Maximal difference between timesheet and attendances
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['hr_timesheet', 'hr_timesheet_invoice'],
'data': [
'security/ir.model.access.csv',
'security/hr_timesheet_sheet_security.xml',
'hr_timesheet_sheet_view.xml',
'hr_timesheet_workflow.xml',
'report/hr_timesheet_report_view.xml',
'wizard/hr_timesheet_current_view.xml',
'hr_timesheet_sheet_data.xml',
'res_config_view.xml',
'views/hr_timesheet_sheet.xml',
],
'demo': ['hr_timesheet_sheet_demo.xml'],
'test':['test/test_hr_timesheet_sheet.yml'],
'installable': True,
'auto_install': False,
'application': True,
'qweb': ['static/src/xml/timesheet.xml',],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dendisuhubdy/tensorflow | tensorflow/contrib/linear_optimizer/python/sdca_estimator.py | 39 | 21783 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import session_run_hook
def _head_is_valid_for_sdca(head):
"""Returns true if the provided head is supported by SDCAOptimizer."""
# pylint: disable=protected-access
return isinstance(head, head_lib._BinaryLogisticHead) or isinstance(
head, head_lib._BinarySvmHead) or isinstance(head,
head_lib._RegressionHead)
# pylint: enable=protected-access
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones(
[batch_size, 1], dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def sdca_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` with values in the set {0, 1}.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* l1_regularization: Global (across all examples) L1-regularization
parameter.
* l2_regularization: Global (across all examples) L2-regularization
parameter.
* num_loss_partitions: Number of partitions of the global loss function
optimized by `SDCAOptimizer`.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If the type of head is not one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
example_id_column = params["example_id_column"]
l1_regularization = params["l1_regularization"]
l2_regularization = params["l2_regularization"]
num_loss_partitions = params["num_loss_partitions"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
partitioner = params["partitioner"]
loss_type = None
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(type(head)))
assert head.logits_dimension == 1, (
"SDCA only applies to logits_dimension=1.")
# Update num_loss_partitions based on number of workers.
n_loss_partitions = num_loss_partitions or max(1, config.num_worker_replicas)
optimizer = sdca_optimizer.SDCAOptimizer(
example_id_column=example_id_column,
num_loss_partitions=n_loss_partitions,
symmetric_l1_regularization=l1_regularization,
symmetric_l2_regularization=l2_regularization,
partitioner=partitioner)
parent_scope = "linear"
with variable_scope.variable_scope(
values=features.values(), name_or_scope=parent_scope,
partitioner=partitioner) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = training_util.get_global_step()
sdca_model, train_op = optimizer.get_train_step(
columns_to_variables, weight_column_name, loss_type, features, labels,
global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(training_chief_hooks=(
model_fn_ops.training_chief_hooks + [update_weights_hook]))
return model_fn_ops
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class _SDCAEstimator(estimator.Estimator):
"""Base estimator class for linear models using the SDCA optimizer.
This class should not be used directly. Rather, users should call one of the
derived estimators.
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
head=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None,
partitioner=None):
"""Construct a `_SDCAEstimator` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
head: type of head. Currently, _BinaryLogisticHead and _BinarySvmHead are
supported for classification and _RegressionHead for regression. It
should be a subclass of _SingleHead.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
partitioner: Variable partitioner for the primal weights (`div`
partitioning strategy will be used).
Returns:
A `_SDCAEstimator` estimator.
Raises:
ValueError: if head is not supported by SDCA.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
if not _head_is_valid_for_sdca(head):
raise ValueError(
"head type: {} is not supported. Supported head types: "
"_BinaryLogisticHead, _BinarySvmHead and _RegressionHead.".format(
type(head)))
assert head.logits_dimension == 1
params = {
"head": head,
"feature_columns": feature_columns,
"example_id_column": example_id_column,
"num_loss_partitions": num_loss_partitions,
"l1_regularization": l1_regularization,
"l2_regularization": l2_regularization,
"weight_column_name": weight_column_name,
"update_weights_hook": _SdcaUpdateWeightsHook(),
"partitioner": partitioner,
}
super(_SDCAEstimator, self).__init__(
model_fn=sdca_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
class SDCALogisticClassifier(_SDCAEstimator):
"""Logistic regression binary classifier using the SDCA optimizer.
Example usage:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
classifier = SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b]),
weight_column_name=...,
l2_regularization=...,
num_loss_partitions=...,
)
# Input builders
# returns x, y (where y is the label Tensor (with 0/1 values)
def input_fn_{train, eval}:
# returns x (features dict)
def input_fn_test:
...
classifier.fit(input_fn=input_fn_train)
classifier.evaluate(input_fn=input_fn_eval)
# Returns predicted classes.
classifier.predict_classes(input_fn=input_fn_test)
# Returns predicted probabilities.
classifier.predict_proba(input_fn=input_fn_test)
```
The input_fn provided to `fit`, `evaluate` and predict_* methods should return
the following features, otherwise there will be a `KeyError`:
* A feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
* If `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* For each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name` whose
`value` is a `SparseTensor`
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None,
partitioner=None):
"""Construct a `SDCALogisticClassifier` object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the iterable should derive from `FeatureColumn`.
Note that the order of the items is ignored at model construction time.
weight_column_name: A string defining feature column name representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: Number of partitions of the global loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
partitioner: Variable partitioner for the primal weights (`div`
partitioning strategy will be used).
Returns:
A `SDCALogisiticClassifier` estimator.
"""
super(SDCALogisticClassifier, self).__init__(
example_id_column=example_id_column,
feature_columns=feature_columns,
weight_column_name=weight_column_name,
model_dir=model_dir,
head=head_lib.multi_class_head(
n_classes=2, weight_column_name=weight_column_name),
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
num_loss_partitions=num_loss_partitions,
config=config,
feature_engineering_fn=None,
partitioner=partitioner)
def predict_classes(self, input_fn=None):
"""Runs inference to determine the predicted class.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted classes for the features provided by input_fn.
"""
key = prediction_key.PredictionKey.CLASSES
predictions = super(SDCALogisticClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
def predict_proba(self, input_fn=None):
"""Runs inference to determine the class probability predictions.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted class probabilities for the features provided by
input_fn.
"""
key = prediction_key.PredictionKey.PROBABILITIES
predictions = super(SDCALogisticClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
class SDCALinearRegressor(_SDCAEstimator):
"""Linear regression model using SDCA to solve the underlying optimization.
Example usage:
```python
real_column_a = real_valued_column(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
regressor = SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[real_column_a, sparse_column_b]),
weight_column_name=...,
l2_regularization=...,
num_loss_partitions=...,
)
# Input builders
# returns x, y (where y is the label Tensor (with 0/1 values)
def input_fn_{train, eval}:
# returns x (features dict)
def input_fn_test:
...
regressor.fit(input_fn=input_fn_train)
regressor.evaluate(input_fn=input_fn_eval)
regressor.predict_scores(input_fn=input_fn_test) # returns predicted scores.
```
The input_fn provided to `fit`, `evaluate` and predict_* methods should return
the following features, otherwise there will be a `KeyError`:
* A feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
* If `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* For each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name` whose
`value` is a `SparseTensor`
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None,
partitioner=None):
"""Construct a `SDCALinearRegressor` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the iterable should derive from `FeatureColumn`.
Note that the order of the items is ignored at model construction time.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
partitioner: Variable partitioner for the primal weights (`div`
partitioning strategy will be used).
Returns:
A `SDCALinearRegressor` estimator.
"""
super(SDCALinearRegressor, self).__init__(
example_id_column=example_id_column,
feature_columns=feature_columns,
weight_column_name=weight_column_name,
model_dir=model_dir,
head=head_lib.regression_head(weight_column_name=weight_column_name),
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
num_loss_partitions=num_loss_partitions,
config=config,
feature_engineering_fn=None,
partitioner=partitioner)
def predict_scores(self, input_fn):
"""Returns predicted scores for given features.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted scores for the features provided by input_fn.
"""
key = prediction_key.PredictionKey.SCORES
predictions = super(SDCALinearRegressor, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
| apache-2.0 |
leki75/ansible | lib/ansible/plugins/connection/winrm.py | 14 | 25420 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import inspect
import os
import re
import shlex
import socket
import traceback
import json
import tempfile
import subprocess
import itertools
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlunsplit
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import exec_wrapper, become_wrapper, leaf_exec
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
HAS_WINRM = True
except ImportError as e:
HAS_WINRM = False
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError as e:
HAS_XMLTODICT = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
become_methods = ['runas']
allow_executable = False
def __init__(self, *args, **kwargs):
self.has_pipelining = True
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
# FUTURE: Add runas support
super(Connection, self).__init__(*args, **kwargs)
def transport_test(self, connect_timeout):
''' Test the transport mechanism, if available '''
host = self._winrm_host
port = int(self._winrm_port)
display.vvv("attempting transport test to %s:%s" % (host, port))
sock = socket.create_connection((host, port), connect_timeout)
sock.close()
def set_host_overrides(self, host, hostvars=None):
'''
Override WinRM-specific options from host variables.
'''
if not HAS_WINRM:
return
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
self._become_method = self._play_context.become_method
self._become_user = self._play_context.become_user
self._become_pass = self._play_context.become_pass
self._kinit_cmd = hostvars.get('ansible_winrm_kinit_cmd', 'kinit')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = 'kerberos,%s' % transport_selector
else:
self._winrm_transport = transport_selector
self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, string_types):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = to_text(hostvars.get('ansible_winrm_kinit_mode', '')).strip()
if kinit_mode == "":
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
else:
raise AnsibleError('Unknown ansible_winrm_kinit_mode value: "%s" (must be "managed" or "manual")' % kinit_mode)
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = inspect.getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching kwargs, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal, password):
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
krbenv = dict(KRB5CCNAME=krb5ccname)
os.environ["KRB5CCNAME"] = krb5ccname
kinit_cmdline = [self._kinit_cmd, principal]
display.vvvvv("calling kinit for principal %s" % principal)
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krbenv)
# TODO: unicode/py3
stdout, stderr = p.communicate(password + b'\n')
if p.returncode != 0:
raise AnsibleConnectionFailure("Kerberos auth failure: %s" % stderr.strip())
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
# TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that
# comes from this)
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
from traceback import format_exc
display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex))
stdin_push_failed = True
if stdin_push_failed:
raise AnsibleError('winrm send_input failed')
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_text(e))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_text(e))
super(Connection, self)._connect()
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
return self
def _reset(self): # used by win_reboot (and any other action that might need to bounce the state)
self.protocol = None
self.shell_id = None
self._connect()
def _create_raw_wrapper_payload(self, cmd, environment=dict()):
payload = {
'module_entry': to_text(base64.b64encode(to_bytes(cmd))),
'powershell_modules': {},
'actions': ['exec'],
'exec': to_text(base64.b64encode(to_bytes(leaf_exec))),
'environment': environment
}
return json.dumps(payload)
def _wrapper_payload_stream(self, payload, buffer_size=200000):
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def exec_command_old(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleConnectionFailure("failed to exec cmd %s" % cmd)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def is_clixml(self, value):
return value.startswith(b"#< CLIXML")
# hacky way to get just stdout- not always sure of doc framing here, so use with care
def parse_clixml_stream(self, clixml_doc, stream_name='Error'):
clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'')
doc = xmltodict.parse(clear_xml)
lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name]
return '\r\n'.join(lines)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), ''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + '\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield "", True
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
script_template = u'''
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
'''
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
# TODO: improve error handling
if result.status_code != 0:
raise AnsibleError(to_native(result.std_err))
put_output = json.loads(result.std_out)
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = New-Object IO.FileStream("%(path)s", [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [IO.FileShare]::ReadWrite);
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_native(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
makedirs_safe(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| gpl-3.0 |
kjb085/cmdvninja-sublime | requests/packages/charade/__init__.py | 122 | 1327 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "1.0.3"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| mit |
NINAnor/QGIS | python/plugins/processing/algs/qgis/ExportGeometryInfo.py | 1 | 4986 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExportGeometryInfo.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QGis, QgsProject, QgsCoordinateTransform, QgsFeature, QgsGeometry, QgsField
from PyQt4.QtCore import QVariant
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class ExportGeometryInfo(GeoAlgorithm):
INPUT = 'INPUT'
METHOD = 'CALC_METHOD'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Export/Add geometry columns')
self.group, self.i18n_group = self.trAlgorithm('Vector table tools')
self.calc_methods = [self.tr('Layer CRS'),
self.tr('Project CRS'),
self.tr('Ellipsoidal')]
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterSelection(self.METHOD,
self.tr('Calculate using'), self.calc_methods, 0))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Added geom info')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
method = self.getParameterValue(self.METHOD)
geometryType = layer.geometryType()
fields = layer.pendingFields()
if geometryType == QGis.Polygon:
areaName = vector.createUniqueFieldName('area', fields)
fields.append(QgsField(areaName, QVariant.Double))
perimeterName = vector.createUniqueFieldName('perimeter', fields)
fields.append(QgsField(perimeterName, QVariant.Double))
elif geometryType == QGis.Line:
lengthName = vector.createUniqueFieldName('length', fields)
fields.append(QgsField(lengthName, QVariant.Double))
else:
xName = vector.createUniqueFieldName('xcoord', fields)
fields.append(QgsField(xName, QVariant.Double))
yName = vector.createUniqueFieldName('ycoord', fields)
fields.append(QgsField(yName, QVariant.Double))
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields.toList(), layer.dataProvider().geometryType(), layer.crs())
ellips = None
crs = None
coordTransform = None
# Calculate with:
# 0 - layer CRS
# 1 - project CRS
# 2 - ellipsoidal
if method == 2:
ellips = QgsProject.instance().readEntry('Measure', '/Ellipsoid',
'NONE')[0]
crs = layer.crs().srsid()
elif method == 1:
mapCRS = iface.mapCanvas().mapSettings().destinationCrs()
layCRS = layer.crs()
coordTransform = QgsCoordinateTransform(layCRS, mapCRS)
outFeat = QgsFeature()
inGeom = QgsGeometry()
outFeat.initAttributes(len(fields))
outFeat.setFields(fields)
current = 0
features = vector.features(layer)
total = 100.0 / float(len(features))
for f in features:
inGeom = f.geometry()
if method == 1:
inGeom.transform(coordTransform)
(attr1, attr2) = vector.simpleMeasure(inGeom, method, ellips, crs)
outFeat.setGeometry(inGeom)
attrs = f.attributes()
attrs.append(attr1)
if attr2 is not None:
attrs.append(attr2)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
current += 1
progress.setPercentage(int(current * total))
del writer
| gpl-2.0 |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/real-python-test/env/lib/python3.5/site-packages/setuptools/dist.py | 79 | 35704 | __all__ = ['Distribution']
import re
import os
import sys
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
import pkg_resources
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
_patch_distribution_metadata_write_pkg_info()
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| mit |
miloszz/DIRAC | FrameworkSystem/Agent/SystemLoggingDBCleaner.py | 7 | 1633 | # $HeadURL$
__RCSID__ = "$Id$"
""" SystemLoggingDBCleaner erases records whose messageTime column
contains a time older than 'RemoveDate' days, where 'RemoveDate'
is an entry in the Configuration Service section of the agent.
"""
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC import S_OK
from DIRAC.FrameworkSystem.DB.SystemLoggingDB import SystemLoggingDB
from DIRAC.Core.Utilities.Time import dateTime, toString, day
class SystemLoggingDBCleaner(AgentModule):
def initialize(self):
self.SystemLoggingDB = SystemLoggingDB()
self.period = int( self.am_getOption( "RemoveDate", '30' ) ) * day
return S_OK()
def execute(self):
""" The main agent execution method
"""
limitDate = toString( dateTime() - self.period )
limitDate = limitDate[:limitDate.find('.')]
commonString = 'FROM MessageRepository WHERE messageTime <'
cmd = "SELECT count(*) %s '%s'" % ( commonString, limitDate )
result = self.SystemLoggingDB._query( cmd )
if not result['OK']:
return result
recordsToErase=result['Value'][0][0]
if recordsToErase == 0:
self.log.info('No records to erase')
return S_OK('No records to erase')
else:
cmd = "DELETE LOW_PRIORITY %s '%s'" % ( commonString, limitDate )
result = self.SystemLoggingDB._update( cmd )
if not result['OK']:
self.log.error( 'Could not erase the requested records',
'those older than %s' % limitDate )
return result
else:
self.log.info('%s records have been erased' % recordsToErase )
return result
| gpl-3.0 |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/alembic/templates/generic/env.py | 71 | 1995 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
abhishek-ch/hue | desktop/core/ext-py/pyopenssl/doc/tools/anno-api.py | 36 | 2186 | #! /usr/bin/env python
"""Add reference count annotations to the Python/C API Reference."""
__version__ = '$Revision: 1.1.1.1 $'
import getopt
import os
import sys
import refcounts
PREFIX_1 = r"\begin{cfuncdesc}{PyObject*}{"
PREFIX_2 = r"\begin{cfuncdesc}{PyVarObject*}{"
def main():
rcfile = os.path.join(os.path.dirname(refcounts.__file__), os.pardir,
"api", "refcounts.dat")
outfile = "-"
opts, args = getopt.getopt(sys.argv[1:], "o:r:", ["output=", "refcounts="])
for opt, arg in opts:
if opt in ("-o", "--output"):
outfile = arg
elif opt in ("-r", "--refcounts"):
rcfile = arg
rcdict = refcounts.load(rcfile)
if outfile == "-":
output = sys.stdout
else:
output = open(outfile, "w")
if not args:
args = ["-"]
for infile in args:
if infile == "-":
input = sys.stdin
else:
input = open(infile)
while 1:
line = input.readline()
if not line:
break
prefix = None
if line.startswith(PREFIX_1):
prefix = PREFIX_1
elif line.startswith(PREFIX_2):
prefix = PREFIX_2
if prefix:
s = line[len(prefix):].split('}', 1)[0]
try:
info = rcdict[s]
except KeyError:
sys.stderr.write("No refcount data for %s\n" % s)
else:
if info.result_type in ("PyObject*", "PyVarObject*"):
if info.result_refs is None:
rc = "Always \NULL{}"
else:
rc = info.result_refs and "New" or "Borrowed"
rc = rc + " reference"
line = (r"\begin{cfuncdesc}[%s]{%s}{"
% (rc, info.result_type)) \
+ line[len(prefix):]
output.write(line)
if infile != "-":
input.close()
if outfile != "-":
output.close()
if __name__ == "__main__":
main()
| apache-2.0 |
illicitonion/buck | third-party/py/unittest2/unittest2/test/test_unittest2_with.py | 111 | 5018 | from __future__ import with_statement
import unittest2
from unittest2.test.support import OldTestResult, catch_warnings
import warnings
# needed to enable the deprecation warnings
warnings.simplefilter('default')
class TestWith(unittest2.TestCase):
"""Tests that use the with statement live in this
module so that all other tests can be run with Python 2.4.
"""
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException, e:
self.assertIn("KeyError not raised", e.args)
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception, e:
raise
self.assertIs(cm.exception, e)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException, e:
self.assertIn("KeyError not raised", e.args)
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def test_assert_dict_unicode_error(self):
with catch_warnings(record=True):
# This causes a UnicodeWarning due to its craziness
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': u'\uFFFD'})
def test_formatMessage_unicode_error(self):
with catch_warnings(record=True):
# This causes a UnicodeWarning due to its craziness
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self._formatMessage(one, u'\uFFFD')
def assertOldResultWarning(self, test, failures):
with catch_warnings(record=True) as log:
result = OldTestResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
warning, = log
self.assertIs(warning.category, DeprecationWarning)
def test_old_testresult(self):
class Test(unittest2.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest2.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest2.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def test_old_testresult_setup(self):
class Test(unittest2.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def test_old_testresult_class(self):
class Test(unittest2.TestCase):
def testFoo(self):
pass
Test = unittest2.skip('no reason')(Test)
self.assertOldResultWarning(Test('testFoo'), 0)
def testPendingDeprecationMethodNames(self):
"""Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.
"""
with catch_warnings(record=True):
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, lambda _: 3.14 + u'spam')
self.failIf(False)
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
beni55/thefuck | tests/rules/test_tsuru_not_command.py | 17 | 2760 | import pytest
from tests.utils import Command
from thefuck.rules.tsuru_not_command import match, get_new_command
@pytest.mark.parametrize('command', [
Command('tsuru log', stderr=(
'tsuru: "tchururu" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\tapp-log\n'
'\tlogin\n'
'\tlogout\n'
)),
Command('tsuru app-l', stderr=(
'tsuru: "tchururu" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\tapp-list\n'
'\tapp-log\n'
)),
Command('tsuru user-list', stderr=(
'tsuru: "tchururu" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\tteam-user-list\n'
)),
Command('tsuru targetlist', stderr=(
'tsuru: "tchururu" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\ttarget-list\n'
)),
])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('tsuru tchururu', stderr=(
'tsuru: "tchururu" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
)),
Command('tsuru version', stderr='tsuru version 0.16.0.'),
Command('tsuru help', stderr=(
'tsuru version 0.16.0.\n'
'\nUsage: tsuru command [args]\n'
)),
Command('tsuru platform-list', stderr=(
'- java\n'
'- logstashgiro\n'
'- newnode\n'
'- nodejs\n'
'- php\n'
'- python\n'
'- python3\n'
'- ruby\n'
'- ruby20\n'
'- static\n'
)),
Command('tsuru env-get', stderr='Error: App thefuck not found.'),
])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_commands', [
(Command('tsuru log', stderr=(
'tsuru: "log" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\tapp-log\n'
'\tlogin\n'
'\tlogout\n'
)), ['tsuru login', 'tsuru logout', 'tsuru app-log']),
(Command('tsuru app-l', stderr=(
'tsuru: "app-l" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\tapp-list\n'
'\tapp-log\n'
)), ['tsuru app-log', 'tsuru app-list']),
(Command('tsuru user-list', stderr=(
'tsuru: "user-list" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\tteam-user-list\n'
)), ['tsuru team-user-list']),
(Command('tsuru targetlist', stderr=(
'tsuru: "targetlist" is not a tsuru command. See "tsuru help".\n'
'\nDid you mean?\n'
'\ttarget-list\n'
)), ['tsuru target-list']),
])
def test_get_new_command(command, new_commands):
assert get_new_command(command) == new_commands
| mit |
yesho/MITMf | core/responder/fingerprint.py | 24 | 2201 | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import socket
import struct
import string
import logging
from utils import *
from odict import OrderedDict
from packets import SMBHeader, SMBNego, SMBNegoFingerData, SMBSessionFingerData
def OsNameClientVersion(data):
try:
length = struct.unpack('<H',data[43:45])[0]
pack = tuple(data[47+length:].split('\x00\x00\x00'))[:2]
OsVersion, ClientVersion = tuple([e.replace('\x00','') for e in data[47+length:].split('\x00\x00\x00')[:2]])
return OsVersion, ClientVersion
except:
return "Could not fingerprint Os version.", "Could not fingerprint LanManager Client version"
def RunSmbFinger(host):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host)
s.settimeout(0.7)
h = SMBHeader(cmd="\x72",flag1="\x18",flag2="\x53\xc8")
n = SMBNego(data = SMBNegoFingerData())
n.calculate()
Packet = str(h)+str(n)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
s.send(Buffer)
data = s.recv(2048)
if data[8:10] == "\x72\x00":
Header = SMBHeader(cmd="\x73",flag1="\x18",flag2="\x17\xc8",uid="\x00\x00")
Body = SMBSessionFingerData()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
s.send(Buffer)
data = s.recv(2048)
if data[8:10] == "\x73\x16":
return OsNameClientVersion(data)
except:
settings.Config.AnalyzeLogger.warning("Fingerprint failed for host: {}".format(host))
return None
| gpl-3.0 |
bewallyt/Classy | signup/combine_slots.py | 3 | 1958 | from posts.models import Post
from signup.models import SignUp, SignUpSlot, TimeBlock
from posts.serializers import PostSerializer
from signup.serializers import SignUpSheetSerializer,SignUpSlotSerializer, TimeBlockSerializer
from datetime import datetime
def combine(requester, post, signup, num_slots_to_combine):
timeblock_list = signup.myblocks.all()
new_post = Post(author = requester, content = post.content)
new_post.save()
new_sign = SignUp(post = new_post, name = signup.name, location = signup.location,
max_slots = signup.max_slots, max_duration = signup.max_duration,
min_duration = signup.min_duration)
new_sign.save()
for block in timeblock_list:
new_block = TimeBlock(sheet = new_sign, start_time= block.start_time, end_time = block.end_time)
new_block.save()
slot_list = block.myslots.all()
i = 0
j = i + num_slots_to_combine
while j <= len(slot_list):
new_owner = None
for index in range(i,j):
if (slot_list[index].owner == None) or (slot_list[index].owner == requester):
print 'Available slot'
else:
print 'Slot taken by: ' + slot_list[index].owner.username
new_owner = slot_list[index].owner
new_slot = SignUpSlot(owner = new_owner, block = new_block,
start_time = slot_list[i].start_time, end_time = slot_list[j - 1].end_time)
if new_owner == None:
new_slot.save()
i = i + 1
j = j + 1
data = SignUpSheetSerializer(new_sign, context={'is_owner': False, 'requester': requester.username})
print data.data
new_sign.delete()
new_post.delete()
return data.data
def unicode_to_datetime(code):
datetime_obj = datetime.strptime(code, '%Y-%m-%dT%H:%M:%S.%fZ')
return datetime_obj
| mit |
ContinuumIO/pypi-conda-builds | classify_logs.py | 1 | 3161 | import re
import yaml
error_types = ["No recipe available",
"No packages found in current linux-64 channels",
"missing build dependency",
"test failure: missing dependency",
"test failure: other reasons",
"invalid syntax",
"unclassified"]
def has_missing_test_dependency(log):
"""
Return: (Status, missing packages)
"""
None
def no_packages_found(log):
p = re.compile("Error: No packages found in current linux-64 channels")
return any([re.match(p, line) for line in log])
def split_build_and_test(log):
# XXX: This can be very memory inefficient
# Maybe don't even need to split the test and build parts
try:
p = re.compile("TEST START")
test_start = [re.match(p, line) is not None for line in log]
start_index = test_start.index(True)
return log[:start_index], log[start_index:]
except ValueError:
return log, []
def has_missing_dependency(log):
p = re.compile("ImportError:")
return any([re.match(p, line) for line in log])
def has_test_failure(log):
p = re.compile("TESTS FAILED")
return re.match(p, log[-1]) is not None
def has_missing_build_dependency(log):
p = re.compile("RuntimeError: Setuptools downloading is disabled")
return any([re.match(p, line) for line in log])
def has_invalid_syntax(log):
p = re.compile("SyntaxError: invalid syntax")
return any([re.match(p, line) for line in log])
def classify_build_log(log_file, package):
"""
Takes a build log file object as an input and returns
a tupe `(category, sub-category, sub-category)`
- missing dependency:
- Build Dependency
- Test Dependency
- Runtime error (other than missing dependency)
"""
if package['recipe'] is False:
return "No recipe available"
log = log_file.readlines()
if no_packages_found(log):
return "No packages found in current linux-64 channels"
build_log, test_log = split_build_and_test(log)
if test_log:
if has_missing_dependency(test_log):
return "test failure: missing dependency"
if has_test_failure(test_log):
return "test failure: other reasons"
if has_missing_build_dependency(log):
return "missing build dependency"
if has_invalid_syntax(log):
return "invalid syntax"
return "unclassified"
def classify_all_logs():
packages = yaml.load(file('packages.yaml', 'r'))
log_dir = "./logs/"
for package in packages:
if package['build'] is False:
log_file_name = log_dir + "%s_build.log" % (package['name'])
log_file = open(log_file_name, 'r')
error_type = classify_build_log(log_file, package)
else:
error_type = None
package['build_error_type'] = error_type
open('packages.yaml', 'w').writelines(yaml.dump(packages))
if __name__ == "__main__":
packages = yaml.load(file('packages.yaml', 'r'))
classify_all_logs()
package_dict = dict([(package['name'], i) for i, package in enumerate(packages)])
| bsd-3-clause |
bsmr-misc-forks/letsencrypt | acme/acme/jose/util.py | 19 | 7418 | """JOSE utilities."""
import collections
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
import six
class abstractclassmethod(classmethod):
# pylint: disable=invalid-name,too-few-public-methods
"""Descriptor for an abstract classmethod.
It augments the :mod:`abc` framework with an abstract
classmethod. This is implemented as :class:`abc.abstractclassmethod`
in the standard Python library starting with version 3.2.
This particular implementation, allegedly based on Python 3.3 source
code, is stolen from
http://stackoverflow.com/questions/11217878/python-2-7-combine-abc-abstractmethod-and-classmethod.
"""
__isabstractmethod__ = True
def __init__(self, target):
target.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(target)
class ComparableX509(object): # pylint: disable=too-few-public-methods
"""Wrapper for OpenSSL.crypto.X509** objects that supports __eq__.
:ivar wrapped: Wrapped certificate or certificate request.
:type wrapped: `OpenSSL.crypto.X509` or `OpenSSL.crypto.X509Req`.
"""
def __init__(self, wrapped):
assert isinstance(wrapped, OpenSSL.crypto.X509) or isinstance(
wrapped, OpenSSL.crypto.X509Req)
self.wrapped = wrapped
def __getattr__(self, name):
return getattr(self.wrapped, name)
def _dump(self, filetype=OpenSSL.crypto.FILETYPE_ASN1):
"""Dumps the object into a buffer with the specified encoding.
:param int filetype: The desired encoding. Should be one of
`OpenSSL.crypto.FILETYPE_ASN1`,
`OpenSSL.crypto.FILETYPE_PEM`, or
`OpenSSL.crypto.FILETYPE_TEXT`.
:returns: Encoded X509 object.
:rtype: str
"""
if isinstance(self.wrapped, OpenSSL.crypto.X509):
func = OpenSSL.crypto.dump_certificate
else: # assert in __init__ makes sure this is X509Req
func = OpenSSL.crypto.dump_certificate_request
return func(filetype, self.wrapped)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
# pylint: disable=protected-access
return self._dump() == other._dump()
def __hash__(self):
return hash((self.__class__, self._dump()))
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__, self.wrapped)
class ComparableKey(object): # pylint: disable=too-few-public-methods
"""Comparable wrapper for `cryptography` keys.
See https://github.com/pyca/cryptography/issues/2122.
"""
__hash__ = NotImplemented
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __eq__(self, other):
# pylint: disable=protected-access
if (not isinstance(other, self.__class__) or
self._wrapped.__class__ is not other._wrapped.__class__):
return NotImplemented
elif hasattr(self._wrapped, 'private_numbers'):
return self.private_numbers() == other.private_numbers()
elif hasattr(self._wrapped, 'public_numbers'):
return self.public_numbers() == other.public_numbers()
else:
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__, self._wrapped)
def public_key(self):
"""Get wrapped public key."""
return self.__class__(self._wrapped.public_key())
class ComparableRSAKey(ComparableKey): # pylint: disable=too-few-public-methods
"""Wrapper for `cryptography` RSA keys.
Wraps around:
- `cryptography.hazmat.primitives.asymmetric.RSAPrivateKey`
- `cryptography.hazmat.primitives.asymmetric.RSAPublicKey`
"""
def __hash__(self):
# public_numbers() hasn't got stable hash!
# https://github.com/pyca/cryptography/issues/2143
if isinstance(self._wrapped, rsa.RSAPrivateKeyWithSerialization):
priv = self.private_numbers()
pub = priv.public_numbers
return hash((self.__class__, priv.p, priv.q, priv.dmp1,
priv.dmq1, priv.iqmp, pub.n, pub.e))
elif isinstance(self._wrapped, rsa.RSAPublicKeyWithSerialization):
pub = self.public_numbers()
return hash((self.__class__, pub.n, pub.e))
class ImmutableMap(collections.Mapping, collections.Hashable):
# pylint: disable=too-few-public-methods
"""Immutable key to value mapping with attribute access."""
__slots__ = ()
"""Must be overridden in subclasses."""
def __init__(self, **kwargs):
if set(kwargs) != set(self.__slots__):
raise TypeError(
'__init__() takes exactly the following arguments: {0} '
'({1} given)'.format(', '.join(self.__slots__),
', '.join(kwargs) if kwargs else 'none'))
for slot in self.__slots__:
object.__setattr__(self, slot, kwargs.pop(slot))
def update(self, **kwargs):
"""Return updated map."""
items = dict(self)
items.update(kwargs)
return type(self)(**items) # pylint: disable=star-args
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __iter__(self):
return iter(self.__slots__)
def __len__(self):
return len(self.__slots__)
def __hash__(self):
return hash(tuple(getattr(self, slot) for slot in self.__slots__))
def __setattr__(self, name, value):
raise AttributeError("can't set attribute")
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, ', '.join(
'{0}={1!r}'.format(key, value)
for key, value in six.iteritems(self)))
class frozendict(collections.Mapping, collections.Hashable):
# pylint: disable=invalid-name,too-few-public-methods
"""Frozen dictionary."""
__slots__ = ('_items', '_keys')
def __init__(self, *args, **kwargs):
if kwargs and not args:
items = dict(kwargs)
elif len(args) == 1 and isinstance(args[0], collections.Mapping):
items = args[0]
else:
raise TypeError()
# TODO: support generators/iterators
object.__setattr__(self, '_items', items)
object.__setattr__(self, '_keys', tuple(sorted(six.iterkeys(items))))
def __getitem__(self, key):
return self._items[key]
def __iter__(self):
return iter(self._keys)
def __len__(self):
return len(self._items)
def _sorted_items(self):
return tuple((key, self[key]) for key in self._keys)
def __hash__(self):
return hash(self._sorted_items())
def __getattr__(self, name):
try:
return self._items[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
raise AttributeError("can't set attribute")
def __repr__(self):
return 'frozendict({0})'.format(', '.join('{0}={1!r}'.format(
key, value) for key, value in self._sorted_items()))
| apache-2.0 |
mitchelljkotler/django | tests/template_tests/filter_tests/test_autoescape.py | 513 | 1342 | from django.test import SimpleTestCase
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeStringfilterTests(SimpleTestCase):
"""
Filters decorated with stringfilter still respect is_safe.
"""
@setup({'autoescape-stringfilter01': '{{ unsafe|capfirst }}'})
def test_autoescape_stringfilter01(self):
output = self.engine.render_to_string('autoescape-stringfilter01', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter02': '{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter02(self):
output = self.engine.render_to_string('autoescape-stringfilter02', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter03': '{{ safe|capfirst }}'})
def test_autoescape_stringfilter03(self):
output = self.engine.render_to_string('autoescape-stringfilter03', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
@setup({'autoescape-stringfilter04': '{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter04(self):
output = self.engine.render_to_string('autoescape-stringfilter04', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
| bsd-3-clause |
lcrees/callchain | fabfile.py | 1 | 1179 | '''callchain fabfile'''
from fabric.api import prompt, local, settings, env
def _test(val):
truth = val in ['py26', 'py27', 'py32']
if truth is False:
raise KeyError(val)
return val
def tox():
'''test callchain'''
local('tox')
def tox_recreate():
'''recreate callchain test env'''
prompt(
'Enter testenv: [py26, py27, py32]',
'testenv',
validate=_test,
)
local('tox --recreate -e %(testenv)s' % env)
def release():
'''release callchain'''
local('hg update pu')
local('hg update next')
local('hg merge pu; hg ci -m automerge')
local('hg update maint')
local('hg merge default; hg ci -m automerge')
local('hg update default')
local('hg merge next; hg ci -m automerge')
local('hg update pu')
local('hg merge default; hg ci -m automerge')
prompt('Enter tag', 'tag')
with settings(warn_only=True):
local('hg tag "%(tag)s"' % env)
local('hg push ssh://hg@bitbucket.org/lcrees/callchain')
local('hg push git+ssh://git@github.com:kwarterthieves/callchain.git')
# local('python setup.py register sdist --format=bztar,gztar,zip upload')
| mit |
odoo-brazil/PySPED | pysped/relato_sped/base.py | 9 | 15634 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4, landscape
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT
from reportlab.lib.colors import HexColor
from geraldo import ReportBand
from geraldo import ObjectValue, Label
import os
DIRNAME = os.path.dirname(__file__)
''' Margens e tamanhos padronizados '''
RETRATO = A4
PAISAGEM = landscape(A4)
MARGEM_SUPERIOR = 0.8*cm
MARGEM_INFERIOR = 0.8*cm
MARGEM_ESQUERDA = 0.8*cm
MARGEM_DIREITA = 0.8*cm
LARGURA_RETRATO = RETRATO[0] - MARGEM_ESQUERDA - MARGEM_DIREITA
LARGURA_PAISAGEM = PAISAGEM[0] - MARGEM_ESQUERDA - MARGEM_DIREITA
#
# Fontes adicionais
#
FONTES_ADICIONAIS = {
u'Gentium Book Basic': (
(u'Gentium Book Basic' , DIRNAME + u'/fonts/genbkbasr.ttf' , False, False),
(u'Gentium Book Basic Bold' , DIRNAME + u'/fonts/genbkbasb.ttf' , True , False),
(u'Gentium Book Basic Italic' , DIRNAME + u'/fonts/genbkbasi.ttf' , False, True),
(u'Gentium Book Basic Bold Italic', DIRNAME + u'/fonts/genbkbasbi.ttf', True , True),
)
}
#
# Estilos padronizados
#
FONTE_NORMAL = 'Gentium Book Basic'
FONTE_NEGRITO = FONTE_NORMAL + ' Bold'
FONTE_ITALICO = FONTE_NORMAL + ' Italic'
FONTE_NEGRITO_ITALICO = FONTE_NORMAL + ' Bold Italic'
FONTE_TAMANHO_5 = 5
FONTE_TAMANHO_6 = FONTE_TAMANHO_5 + 1
FONTE_TAMANHO_7 = FONTE_TAMANHO_5 + 2
FONTE_TAMANHO_8 = FONTE_TAMANHO_5 + 3
FONTE_TAMANHO_85 = FONTE_TAMANHO_5 + 3.5
FONTE_TAMANHO_9 = FONTE_TAMANHO_5 + 4
FONTE_TAMANHO_10 = FONTE_TAMANHO_5 * 2
FONTE_TAMANHO_11 = FONTE_TAMANHO_10 + 1
FONTE_TAMANHO_12 = FONTE_TAMANHO_10 + 2
FONTE_TAMANHO_14 = FONTE_TAMANHO_10 + 4
FONTE_TAMANHO_18 = FONTE_TAMANHO_10 + 8
FONTE_TAMANHO_40 = FONTE_TAMANHO_10 * 4
VERMELHO_CARIMBO = HexColor(0xff9393)
CINZA_MARCADAGUA = HexColor(0x939393)
DESCRITIVO_BLOCO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_8}
DESCRITIVO_CAMPO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_5}
DESCRITIVO_CAMPO_NEGRITO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_5}
DESCRITIVO_PRODUTO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_5, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_7}
DADO_CHAVE = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_85, 'alignment': TA_CENTER}
DADO_VARIAVEL = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_9, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_11}
DADO_CAMPO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_10, 'leading': FONTE_TAMANHO_12}
DADO_CAMPO_NEGRITO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'leading': FONTE_TAMANHO_12}
DADO_CAMPO_NUMERICO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_10, 'alignment': TA_RIGHT, 'leading': FONTE_TAMANHO_12}
DADO_CAMPO_NUMERICO_NEGRITO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'alignment': TA_RIGHT, 'leading': FONTE_TAMANHO_12}
DADO_PRODUTO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'leading': FONTE_TAMANHO_8}
DADO_PRODUTO_NUMERICO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'alignment': TA_RIGHT, 'leading': FONTE_TAMANHO_8}
DADO_PRODUTO_CENTRALIZADO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_8}
DADO_COMPLEMENTAR = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_6, 'leading': FONTE_TAMANHO_8}
DESCRITIVO_DANFE = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_12, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_12}
DESCRITIVO_NUMERO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_10, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_10}
DESCRITIVO_DANFE_GERAL = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_7}
DESCRITIVO_DANFE_ES = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_7, 'alignment': TA_LEFT, 'leading': FONTE_TAMANHO_7}
OBS_CONTINGENCIA = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_18, 'alignment': TA_CENTER, 'textColor': CINZA_MARCADAGUA}
OBS_HOMOLOGACAO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_40, 'alignment': TA_CENTER, 'textColor': VERMELHO_CARIMBO}
OBS_CANCELAMENTO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_40, 'leading': FONTE_TAMANHO_40+24, 'alignment': TA_CENTER, 'textColor': VERMELHO_CARIMBO, 'borderWidth': 3, 'borderColor': VERMELHO_CARIMBO, 'borderRadius': 3}
OBS_DENEGACAO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_40, 'leading': FONTE_TAMANHO_40+36, 'alignment': TA_CENTER, 'textColor': VERMELHO_CARIMBO, 'borderWidth': 3, 'borderColor': VERMELHO_CARIMBO, 'borderRadius': 3}
DESCRITIVO_CAMPO_CANCELAMENTO = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_5, 'leading': FONTE_TAMANHO_5, 'textColor': VERMELHO_CARIMBO, 'backColor': 'white'}
DADO_VARIAVEL_CANCELAMENTO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_9, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_11, 'textColor': VERMELHO_CARIMBO}
DADO_IMPRESSAO = {'fontName': FONTE_NORMAL, 'fontSize': FONTE_TAMANHO_5, 'leading': FONTE_TAMANHO_7}
EMIT_NOME = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_12, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_14}
EMIT_DADOS = {'fontName': FONTE_NEGRITO, 'fontSize': FONTE_TAMANHO_8, 'alignment': TA_CENTER, 'leading': FONTE_TAMANHO_10}
class LabelMargemEsquerda(Label):
def __init__(self):
super(LabelMargemEsquerda, self).__init__()
#self.borders_stroke_width = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': 0.1}
self.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
self.padding_top = 0.08*cm
self.padding_left = 0.08*cm
self.padding_bottom = 0.08*cm
self.padding_right = 0.08*cm
self.style = DESCRITIVO_CAMPO
self.height = 0.70*cm
class LabelMargemDireita(LabelMargemEsquerda):
def __init__(self):
super(LabelMargemDireita, self).__init__()
self.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': 0.1}
class Campo(ObjectValue):
def __init__(self):
super(Campo, self).__init__()
self.padding_top = 0.1*cm
self.padding_left = 0.1*cm
self.padding_bottom = 0.1*cm
self.padding_right = 0.1*cm
self.style = DADO_CAMPO
self.height = 0.70*cm
class Texto(Label):
def __init__(self):
super(Texto, self).__init__()
self.padding_top = 0.1*cm
self.padding_left = 0.1*cm
self.padding_bottom = 0.1*cm
self.padding_right = 0.1*cm
self.style = DADO_CAMPO
self.height = 0.70*cm
class Descritivo(Label):
def __init__(self):
super(Descritivo, self).__init__()
#self.borders_stroke_width = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': 0.1}
self.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': False}
self.padding_top = 0.03*cm
self.padding_left = 0.1*cm
#self.padding_bottom = 0.05*cm
self.padding_right = 0.1*cm
self.style = DESCRITIVO_BLOCO
self.height = 0.42*cm
class BandaDANFE(ReportBand):
def __init__(self):
super(BandaDANFE, self).__init__()
def _inclui_titulo(self, nome, titulo, top, left, width, height=None, margem_direita=False):
# Prepara o Label com o título
if margem_direita:
lbl = LabelMargemDireita()
else:
lbl = LabelMargemEsquerda()
lbl.name = 'lbl_' + nome
lbl.text = titulo
lbl.top = top
lbl.left = left
lbl.width = width
if height:
lbl.height = height
return lbl
def _inclui_campo(self, nome, conteudo, top, left, width, height=None):
fld = Campo()
fld.name = 'fld_' + nome
fld.attribute_name = conteudo
fld.top = top
fld.left = left
fld.width = width
if height:
fld.height = height
return fld
def _inclui_texto(self, nome, texto, top, left, width, height=None):
lbl = Texto()
lbl.name = 'txt_' + nome
lbl.text = texto
lbl.top = top
lbl.left = left
lbl.width = width
if height:
lbl.height = height
return lbl
def inclui_campo(self, nome, titulo, conteudo, top, left, width, height=None, margem_direita=False):
lbl = self._inclui_titulo(nome, titulo, top, left, width, height, margem_direita)
self.elements.append(lbl)
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
fld.padding_top = 0.25*cm
self.elements.append(fld)
return lbl, fld
def inclui_campo_numerico(self, nome, titulo, conteudo, top, left, width, height=None, margem_direita=False):
lbl, fld = self.inclui_campo(nome, titulo, conteudo, top, left, width, height, margem_direita)
fld.style = DADO_CAMPO_NUMERICO
return lbl, fld
def inclui_texto(self, nome, titulo, texto, top, left, width, height=None, margem_direita=False):
lbl = self._inclui_titulo(nome, titulo, top, left, width, height, margem_direita)
self.elements.append(lbl)
if texto:
txt = self._inclui_texto(nome, texto, top, left, width, height)
txt.padding_top = 0.25*cm
self.elements.append(txt)
else:
txt = None
return lbl, txt
def inclui_texto_numerico(self, nome, titulo, texto, top, left, width, height=None, margem_direita=False):
lbl, txt = self.inclui_texto(nome, titulo, texto, top, left, width, height, margem_direita)
if txt:
txt.style = DADO_CAMPO_NUMERICO
return lbl, txt
def inclui_descritivo(self, nome, titulo, top, left, width, height=None):
lbl = Descritivo()
lbl.name = 'dsc_' + nome
lbl.text = titulo
lbl.top = top
lbl.left = left
lbl.width = width
if height:
lbl.height = height
self.elements.append(lbl)
return lbl
def inclui_texto_sem_borda(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self._inclui_texto(nome, texto, top, left, width, height)
txt.padding_top = 0.1*cm
self.elements.append(txt)
return txt
def inclui_campo_sem_borda(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
fld.padding_top = 0.1*cm
self.elements.append(fld)
return fld
def inclui_descritivo_produto(self, nome, titulo, top, left, width, height=None, margem_direita=False):
lbl = self._inclui_titulo(nome, titulo, top, left, width, height, margem_direita)
lbl.style = DESCRITIVO_PRODUTO
lbl.padding_top = 0.05*cm
lbl.padding_left = 0.05*cm
lbl.padding_bottom = 0.05*cm
lbl.padding_right = 0.05*cm
if height:
lbl.height = height
else:
lbl.height = 0.52*cm
self.elements.append(lbl)
return lbl
def inclui_campo_produto(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self._inclui_campo(nome, conteudo, top, left, width, height)
if margem_direita:
fld.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': 0.1}
else:
fld.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
fld.style = DADO_PRODUTO
fld.padding_top = 0.05*cm
fld.padding_left = 0.05*cm
fld.padding_bottom = 0.05*cm
fld.padding_right = 0.05*cm
fld.auto_expand_height = True
if height:
fld.height = height
else:
fld.height = 0.28*cm
self.elements.append(fld)
return fld
def inclui_campo_numerico_produto(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self.inclui_campo_produto(nome, conteudo, top, left, width, height, margem_direita)
fld.style = DADO_PRODUTO_NUMERICO
return fld
def inclui_campo_centralizado_produto(self, nome, conteudo, top, left, width, height=None, margem_direita=False):
fld = self.inclui_campo_produto(nome, conteudo, top, left, width, height, margem_direita)
fld.style = DADO_PRODUTO_CENTRALIZADO
return fld
def inclui_texto_produto(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self._inclui_texto(nome, texto, top, left, width, height)
txt.borders_stroke_width = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': 0.1}
if margem_direita:
txt.borders = {'top': 0.1, 'right': False, 'bottom': 0.1, 'left': 0.1}
else:
txt.borders = {'top': 0.1, 'right': 0.1, 'bottom': 0.1, 'left': False}
txt.style = DADO_PRODUTO
txt.padding_top = 0.05*cm
txt.padding_left = 0.05*cm
txt.padding_bottom = 0.05*cm
txt.padding_right = 0.05*cm
txt.auto_expand_height = True
if height:
txt.height = height
else:
txt.height = 0.28*cm
self.elements.append(txt)
return txt
def inclui_texto_numerico_produto(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self.inclui_texto_produto(nome, texto, top, left, width, height, margem_direita)
txt.style = DADO_PRODUTO_NUMERICO
return txt
def inclui_texto_centralizado_produto(self, nome, texto, top, left, width, height=None, margem_direita=False):
txt = self.inclui_texto_produto(nome, texto, top, left, width, height, margem_direita)
txt.style = DADO_PRODUTO_CENTRALIZADO
return txt
| lgpl-2.1 |
TheMOOCAgency/edx-platform | lms/djangoapps/branding/tests/test_api.py | 10 | 4864 | # encoding: utf-8
"""Tests of Branding API """
from __future__ import unicode_literals
from django.test import TestCase
import mock
from branding.api import get_logo_url, get_footer
from django.test.utils import override_settings
class TestHeader(TestCase):
"""Test API end-point for retrieving the header. """
def test_cdn_urls_for_logo(self):
# Ordinarily, we'd use `override_settings()` to override STATIC_URL,
# which is what the staticfiles storage backend is using to construct the URL.
# Unfortunately, other parts of the system are caching this value on module
# load, which can cause other tests to fail. To ensure that this change
# doesn't affect other tests, we patch the `url()` method directly instead.
cdn_url = "http://cdn.example.com/static/image.png"
with mock.patch('branding.api.staticfiles_storage.url', return_value=cdn_url):
logo_url = get_logo_url()
self.assertEqual(logo_url, cdn_url)
class TestFooter(TestCase):
"""Test retrieving the footer. """
@mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True})
@mock.patch.dict('django.conf.settings.MKTG_URLS', {
"ROOT": "https://edx.org",
"ABOUT": "/about-us",
"NEWS": "/news-announcements",
"CONTACT": "/contact",
"FAQ": "/student-faq",
"BLOG": "/edx-blog",
"DONATE": "/donate",
"JOBS": "/jobs",
"SITE_MAP": "/sitemap",
"TOS_AND_HONOR": "/edx-terms-service",
"PRIVACY": "/edx-privacy-policy",
"ACCESSIBILITY": "/accessibility",
"MEDIA_KIT": "/media-kit",
"ENTERPRISE": "/enterprise"
})
@override_settings(PLATFORM_NAME='\xe9dX')
def test_get_footer(self):
actual_footer = get_footer(is_secure=True)
expected_footer = {
'copyright': '\xa9 \xe9dX. All rights reserved except where noted. EdX, Open edX and the edX and Open'
' EdX logos are registered trademarks or trademarks of edX Inc.',
'navigation_links': [
{'url': 'https://edx.org/about-us', 'name': 'about', 'title': 'About'},
{'url': 'https://edx.org/enterprise', 'name': 'enterprise', 'title': '\xe9dX for Business'},
{'url': 'https://edx.org/edx-blog', 'name': 'blog', 'title': 'Blog'},
{'url': 'https://edx.org/news-announcements', 'name': 'news', 'title': 'News'},
{'url': 'https://support.example.com', 'name': 'help-center', 'title': 'Help Center'},
{'url': 'https://edx.org/contact', 'name': 'contact', 'title': 'Contact'},
{'url': 'https://edx.org/donate', 'name': 'donate', 'title': 'Donate'}
],
'legal_links': [
{'url': 'https://edx.org/edx-terms-service',
'name': 'terms_of_service_and_honor_code',
'title': 'Terms of Service & Honor Code'},
{'url': 'https://edx.org/edx-privacy-policy', 'name': 'privacy_policy', 'title': 'Privacy Policy'},
{'url': 'https://edx.org/accessibility',
'name': 'accessibility_policy',
'title': 'Accessibility Policy'},
{'url': 'https://edx.org/sitemap', 'name': 'sitemap', 'title': 'Sitemap'},
{'url': 'https://edx.org/media-kit', 'name': 'media_kit', 'title': 'Media Kit'}
],
'social_links': [
{'url': '#', 'action': 'Like \xe9dX on Facebook', 'name': 'facebook',
'icon-class': 'fa-facebook-square', 'title': 'Facebook'},
{'url': '#', 'action': 'Follow \xe9dX on Twitter', 'name': 'twitter',
'icon-class': 'fa-twitter', 'title': 'Twitter'},
{'url': '#', 'action': 'Subscribe to the \xe9dX YouTube channel',
'name': 'youtube', 'icon-class': 'fa-youtube', 'title': 'Youtube'},
{'url': '#', 'action': 'Follow \xe9dX on LinkedIn', 'name': 'linkedin',
'icon-class': 'fa-linkedin-square', 'title': 'LinkedIn'},
{'url': '#', 'action': 'Follow \xe9dX on Google+', 'name': 'google_plus',
'icon-class': 'fa-google-plus-square', 'title': 'Google+'},
{'url': '#', 'action': 'Subscribe to the \xe9dX subreddit',
'name': 'reddit', 'icon-class': 'fa-reddit', 'title': 'Reddit'}
],
'mobile_links': [],
'logo_image': 'https://edx.org/static/images/logo.png',
'openedx_link': {
'url': 'http://open.edx.org',
'image': 'https://files.edx.org/openedx-logos/edx-openedx-logo-tag.png',
'title': 'Powered by Open edX'
}
}
self.assertEqual(actual_footer, expected_footer)
| agpl-3.0 |
equialgo/scikit-learn | benchmarks/bench_plot_omp_lars.py | 72 | 4514 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import six
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import matplotlib.pyplot as plt
fig = plt.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(six.iteritems(results))):
ax = fig.add_subplot(1, 2, i+1)
vmax = max(1 - timings.min(), -1 + timings.max())
plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + [str(each) for each in samples_range])
ax.set_yticklabels([''] + [str(each) for each in features_range])
plt.xlabel('n_samples')
plt.ylabel('n_features')
plt.title(label)
plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = plt.axes([0.1, 0.08, 0.8, 0.06])
plt.colorbar(cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
hitsl/bouser | bouser/castiel/exceptions.py | 1 | 1163 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Exceptions generated by CAS
"""
from bouser.excs import SerializableBaseException
__author__ = 'viruzzz-kun'
__created__ = '08.02.2015'
class EExpiredToken(SerializableBaseException):
"""
Raised when authentication token is expired or was never taken
"""
def __init__(self, token):
self.token = token
self.message = 'Token %s is expired or not taken' % token.encode('hex')
class ETokenAlreadyAcquired(SerializableBaseException):
"""
Raised when authentication token was already acquired for the user and another is tried to be acquired
"""
def __init__(self, user_id):
self.message = 'Token for user id = %s already taken' % user_id
class EInvalidCredentials(SerializableBaseException):
"""
Raised when invalid credentials were supplied
"""
def __init__(self):
self.message = 'Incorrect login or password'
class ENoToken(SerializableBaseException):
"""
Raised when token-dependent operation was requested, but token was not supplied
"""
def __init__(self):
self.message = 'Token cookie is not set'
| isc |
lzambella/Qyoutube-dl | youtube_dl/extractor/motorsport.py | 129 | 1797 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
class MotorsportIE(InfoExtractor):
IE_DESC = 'motorsport.com'
_VALID_URL = r'http://www\.motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])'
_TEST = {
'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/',
'info_dict': {
'id': '2-T3WuR-KMM',
'ext': 'mp4',
'title': 'Red Bull Racing: 2014 Rules Explained',
'duration': 208,
'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.',
'uploader': 'mcomstaff',
'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ',
'upload_date': '20140903',
'thumbnail': r're:^https?://.+\.jpg$'
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
iframe_path = self._html_search_regex(
r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage,
'iframe path')
iframe = self._download_webpage(
compat_urlparse.urljoin(url, iframe_path), display_id,
'Downloading iframe')
youtube_id = self._search_regex(
r'www.youtube.com/embed/(.{11})', iframe, 'youtube id')
return {
'_type': 'url_transparent',
'display_id': display_id,
'url': 'https://youtube.com/watch?v=%s' % youtube_id,
}
| gpl-3.0 |
ekwoodrich/nirha | nirhaweb/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| apache-2.0 |
csrg-utfsm/acscb | LGPL/Kit/acs/src/acsSearchPath.py | 4 | 11874 | #! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) European Southern Observatory, 2012.
# Copyright by ESO (in the framework of the ALMA collaboration).
# All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# "@(#) $Id: acsSearchPath.py,v 1.2 2012/09/21 11:34:23 eallaert Exp $"
#
# who when what
# -------- ---------- ----------------------------------------------
# eallaert 2012-09-18 created
#
#************************************************************************
# NAME
# acsSearchPath.py - create a search path from various components
#
# SYNOPSIS
# acsSearchPath -p PATH -a PATH -v SUBDIR1 SUBDIR2 ....
#
# whereby the options and arguments are:
# -p: prepend PATH to the result. Default: empty
# -a: append PATH to the result. Default: empty
# -c: clean-up - remove non-existing dirs from path-list
# -v: verbose output. Default: terse
# SUBDIRx: sub-directory to append to each individual directory of
# $INTROOT, $INTLIST and $ACSROOT. Default: empty
#
# Note that this can also be invoked as a function, with the following
# synopsis:
# acsSearchPath(prepend, append, subdirList, cleanup)
#
# whereby the arguments are all optional, with defaults as above.
#
# DESCRIPTION
# acsSearchPath will print a path to stdout that consists of the
# following components, in the order as indicated:
#
# 1. Each directory in the prepend-path, SUBDIRx will be appended to
# it. This is repeated for all SUBDIRx specified, before the
# next directory in the prepend-path is dealt with.
# 2. ../SUBDIRx, for each SUBDIRx, in the order as they are
# specified. If the list of SUBDIRs is empty, ".." will be
# appended to the resulting path.
# This part is only inserted if the environment variable MODPATH
# is set to 1.
# 3. $INTROOT/SUBDIRx, for each SUBDIRx, in the order as they are
# specified. If the list of SUBDIRs is empty, the "pure" $INTROOT
# will be appended to the resulting path
# 4. Each directory in $INTLIST is dealt with as $INTROOT (i.e.
# appending each SUBDIRx as specified, if applicable)
# 5. $ACSROOT is also dealt with as $INTROOT
# 6. Each directory in the prepend-path, SUBDIRx will be appended to
# it. This is repeated for all SUBDIRx specified, before the
# next directory in the prepend-path is dealt with.
#
# This path-string construction removes empty path-components ("::"),
# repeated slashes ("//") within path-components (although these in
# the end do not matter for Linux) and trailing slashes at the end
# of path-components - if applicable.
#
# The pathlist-separator used is system dependent (":" on Linux,
# ";" on MS-Windows). The path separator, i.e. delimiting directories
# from subdirectories, is "/".#
#
# FILES
#
# ENVIRONMENT
# Requires python 2.6 or higher.
# The environment variables INTROOT, INTLIST and ACSROOT are used as
# central part for path construction, but this function will not fail
# if any/all of them do not exist.
#
# The environment variable MODPATH determines wheter or not the
# module's root (i.e. "..") will be included before INTROOT.
#
# RETURN VALUES
# The function acsSearchPath() returns the constructed path as a string;
# If this is run as a utility, the path will be printed to stdout.
#
# CAUTIONS
# There is no verification on the effective existence of the directories
# pointed to by INTROOT, INTLIST, ACSROOT and pre-/append paths, nor
# of the components of the resulting search-patch (with the SUBDIRs
# included) - they are all basically handled as strings, not directories.
#
# As a consequence, if $INTROOT or $ACSROOT have (by mistake) a trailing
# colon, and SUBDIRs are appended, this will lead to confusing and
# probably undesired results - see the last example here below.
#
# A similar situation can occur if SUBDIRx includes (again by mistake)
# leading or trailing whitespace or colons - these arguments are
# treated as ordinary strings, without any character checking.
#
# In any case, this is the same behaviour as e.g. the login scripts.
#
# Note that the path-separator "/" is used, independent of the
# operating system. Using backslashes under MS-Windows (within e.g.
# ACSROOT settings) may lead to unexpected results.
#
# EXAMPLES
# me> export INTROOT=/introot/myIntroot
# me> export INTLIST=/introot/herIntroot:/introot/hisIntroot
# me> export ACSROOT=/alma/ACS15/ACSSW
# me> unset MODPATH
# me>
# me> acsSearchPath
# /introot/myIntroot:/introot/herIntroot:/introot/hisIntroot:/alma/ACS15/ACSSW
# me>
# me> acsSearchPath lib
# /introot/myIntroot/lib:/introot/herIntroot/lib:/introot/hisIntroot/lib:/alma/ACS15/ACSSW/lib
# me>
# me> acsSearchPath -p /first:/second/ lib1 lib2
# /first/lib1:/first/lib2:/second/lib1:/second/lib2:/introot/myIntroot/lib1:/introot/myIntroot/lib2:/introot/herIntroot/lib1:/introot/herIntroot/lib2:/introot/hisIntroot/lib1:/introot/hisIntroot/lib2:/alma/ACS15/ACSSW/lib1:/alma/ACS15/ACSSW/lib2
# me>
# me> unset INTLIST
# me> export MODPATH=1
# me> myPath = `acsSearchPath bin`
# me> echo $myPath
# ../bin:/introot/myIntroot/bin:/alma/ACS15/ACSSW/bin
# me>
# me> # CAUTION: Appending a ":" to $INTROOT is a mistake!
# me> set INTROOT=/introot/myIntroot:
# me> unset MODPATH
# me> myPath = "/anotherPath"`acsSearchPath bin`
# me> # Notice the ":" between $INTROOT and /bin
# me> echo $myPath
# /anotherPath:/introot/myIntroot:/bin:/alma/ACS15/ACSSW/bin
#
# SEE ALSO
#
# BUGS
#
#------------------------------------------------------------------------
#
##import sys
import os
import re; # regular expressions
def acsSearchPath(prepend="", append="", subdirs=[], cleanup=False):
dirList = []
# os.pathsep is the opsys-dependent component separator used in $PATH etc (":" on unix)
ps = os.pathsep
# First the prepend
for dir in prepend.split(ps):
if (len(dir) > 0):
##if (verbose and not os.path.exists(dir)):
## print >> sys.stderr, "WARNING: prepend dir \"" + dir + "\" does not exist on this host."
# if this dir doesn't exist, surely its SUBDIRx neither
##if (not cleanup or os.path.exists(dir)):
dirList.append(dir)
# The module-root dir should only be appended if MODPATH is set to 1
try:
if (os.environ['MODPATH'] == "1"):
# os.pardir is the opsys-dependent parent-dir representation (usually "..")
dirList.append(os.pardir)
except KeyError:
pass
# Now deal with INTROOT-INTLIST-ACSROOT
try:
introot = os.environ['INTROOT']
##if (verbose and not os.path.exists(introot)):
## print >> sys.stderr, "WARNING: INTROOT dir \"" + introot + "\" does not exist on this host."
##if (not cleanup or os.path.exists(introot)):
dirList.append(introot)
except KeyError:
##if (verbose):
## print >> sys.stderr, "INTROOT not set"
pass
try:
for dir in os.environ['INTLIST'].split(ps):
if (len(dir) > 0):
##if (verbose and not os.path.exists(dir)):
## print >> sys.stderr, "WARNING: INTLIST dir \"" + dir + "\" does not exist on this host."
##if (not cleanup or os.path.exists(dir)):
dirList.append(dir)
except KeyError:
##if (verbose):
## print >> sys.stderr, "INTLIST not set"
pass
try:
acsroot = os.environ['ACSROOT']
##if (verbose and not os.path.exists(acsroot)):
## print >> sys.stderr, "WARNING: ACSROOT dir \"" + acsroot + "\" does not exist on this host."
##if (not cleanup or os.path.exists(acsroot)):
dirList.append(acsroot)
except KeyError:
##if verbose:
## print >> sys.stderr, "ACSROOT not set"
pass
# Finally the append
for dir in append.split(ps):
if (len(dir) > 0):
if (not cleanup or os.path.exists(dir)):
dirList.append(dir)
##if (verbose and not os.path.exists(dir)):
## print >> sys.stderr, "WARNING: append dir \"" + dir + "\" does not exist on this host."
# Now for this intermediate result, append the subdirs,
# and put the result into a typical PATH-list string.
# Don't worry about trailing or double slashes etc - they will be
# cleaned up at the end.
path = ""
for dir in dirList:
if (len(subdirs) == 0):
if (not cleanup or os.path.exists(dir)):
path += dir + ps
## else:
##print >> sys.stderr, "WARNING: dir \"" + dir + "\" does not exist on this host."
else:
for subdir in subdirs:
extdir = dir + "/" + subdir
if (not cleanup or os.path.exists(extdir)):
path += extdir + ps
##else:
## print >> sys.stderr, "WARNING: dir \"" + extdir + "\" does not exist on this host."
if (len(path) > 0):
# clean-up: remove double colons, double slashes, slash before colon, trailing colon
path = re.sub (r'//+', '/', path)
path = re.sub (ps+ps+r'+|/'+ps+'+', ps, path)
# last char could still be a single path-separator or slash
if (path[-1] == ps or path[-1] == "/"):
path = path[:-1]
return path
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(usage="%prog -p PATH -a PATH SUBDIR1 SUBDIR2 ...")
parser.add_option("-p", "--prepend", dest = "prepend", type = "str", default = "", help = "path to prepend to result.", metavar = "PATH")
parser.add_option("-a", "--append", dest = "append", type = "str", default = "", help = "path to append to result.", metavar = "PATH")
parser.add_option("-c", "--cleanup", dest = "cleanup", action = "store_true", help = "remove non-existing dirs from pathlist")
parser.add_option("-v", "--verbose", dest = "verbose", action = "store_true", help = "show execution time and run-string")
(options, subdirs) = parser.parse_args()
# having an empty list of subdirs is the same as a single empty subdir
if (len(subdirs) == 1 and len(subdirs[0].strip()) ==0 ):
subdirs = []
if (options.verbose):
import datetime
import sys
runstring = sys.argv[0]
for i in range(1,len(sys.argv)) :
runstring += " " + sys.argv[i]
print "\nRunstring:\n " + runstring + "\n\n%s\n\n" % str(datetime.datetime.now())
print "Search-path: "
# In case the output is re-directed to a file (e.g. by tat), these prints
# may get intermixed with the output from pyunit, due to buffering. To
# avoid that, do a flush.
sys.stdout.flush()
print acsSearchPath(options.prepend, options.append, subdirs, options.cleanup)
| mit |
23maverick23/oaxmlapi | oaxmlapi/base.py | 1 | 1260 | # -*- coding: utf-8
"""The base class has helper methods for often used functions.
"""
from __future__ import absolute_import
from xml.dom import minidom
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class _Base(object):
"""
A base class for defining helpful class methods.
"""
def __init__(self):
self._header = False
def __str__(self):
return "<_Base>"
def _main(self):
return None
def tostring(self):
"""
Return a bytestring containing XML tags.
"""
header, body = None, None
if self._header:
header = b'<?xml version="1.0" encoding="utf-8"?>'
if self._main() is not None:
body = ET.tostring(self._main(), 'utf-8')
return (header if header else b'') + (body if body else b'')
def prettify(self):
"""
Return a formatted, prettified string containing XML tags. Note
that this also adds an XML declaration tag to the top of the XML
document, so this should only be used for debugging.
"""
reparsed = minidom.parseString(self.tostring())
return reparsed.toprettyxml(indent=' ', encoding='utf-8')
| mit |
DeMille/emailhooks | django_nonrel/django/contrib/databrowse/datastructures.py | 100 | 9090 | """
These classes are light wrappers around Django's database API that provide
convenience functionality and permalink functions for the databrowse app.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils import formats
from django.utils.text import capfirst
from django.utils.encoding import smart_text, force_str, iri_to_uri
from django.db.models.query import QuerySet
from django.utils.encoding import python_2_unicode_compatible
EMPTY_VALUE = '(None)'
DISPLAY_SIZE = 100
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = list(site.registry.keys())
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return force_str('<EasyModel for %s>' % self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return '%s%s/%s/' % (self.site.root_url, self.model._meta.app_label, self.model._meta.module_name)
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
easy_qs = self.model._default_manager.get_query_set()._clone(klass=EasyQuerySet)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields + self.model._meta.many_to_many)]
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return force_str('<EasyField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return '%s%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name)
elif self.field.rel:
return '%s%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name)
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return force_str('<EasyChoice for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def url(self):
return '%s%s/%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.field.name, iri_to_uri(self.value))
@python_2_unicode_compatible
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return force_str('<EasyInstance for %s (%s)>' % (self.model.model._meta.object_name, self.instance._get_pk_val()))
def __str__(self):
val = smart_text(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + '...'
return val
def pk(self):
return self.instance._get_pk_val()
def url(self):
return '%s%s/%s/objects/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, iri_to_uri(self.pk()))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields + self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
for rel_object in self.model.model._meta.get_all_related_objects() + self.model.model._meta.get_all_related_many_to_many_objects():
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.model)
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': [EasyInstance(em, i) for i in getattr(self.instance, rel_object.get_accessor_name()).all()],
}
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return force_str('<EasyInstanceField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance, self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = '%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.module_name, iri_to_uri(value._get_pk_val()))
lst.append((smart_text(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = '%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name, iri_to_uri(self.raw_value))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = list(self.values())[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(list(self.values())[0], None)]
return lst
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c
| mit |
skosukhin/spack | var/spack/repos/builtin/packages/r-rpostgresql/package.py | 1 | 2292 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRpostgresql(RPackage):
"""Database interface and PostgreSQL driver for R This package provides a
Database Interface (DBI) compliant driver for R to access PostgreSQL
database systems. In order to build and install this package from source,
PostgreSQL itself must be present your system to provide PostgreSQL
functionality via its libraries and header files. These files are provided
as postgresql-devel package under some Linux distributions. On Microsoft
Windows system the attached libpq library source will be used. A wiki and
issue tracking system for the package are available at Google Code at
https://code.google.com/p/rpostgresql/."""
homepage = "https://code.google.com/p/rpostgresql/"
url = "https://cran.r-project.org/src/contrib/RPostgreSQL_0.4-1.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RPostgreSQL"
version('0.4-1', 'e7b22e212afbb2cbb88bab937f93e55a')
depends_on('r-dbi', type=('build', 'run'))
depends_on('postgresql')
| lgpl-2.1 |
yufengg/tensorflow | tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py | 4 | 57962 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow prediction Ops.
The tests cover tree traversal and additive models for single and
multi class problems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def _append_to_leaf(leaf, c_id, w):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_id: class Id for the weight update.
w: weight contribution value.
"""
leaf.sparse_vector.index.append(c_id)
leaf.sparse_vector.value.append(w)
def _append_multi_values_to_leaf(leaf, c_ids, w):
"""Helper method for building tree leaves with sparse vector of values.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_ids: list of class ids
w: corresponding weight contributions for the classes in c_ids
"""
for i in range(len(c_ids)):
leaf.sparse_vector.index.append(c_ids[i])
leaf.sparse_vector.value.append(w[i])
def _append_multi_values_to_dense_leaf(leaf, w):
"""Helper method for building tree leaves with dense vector of values.
Appends weight contributions to a leaf. w is assumed to be for all classes.
Args:
leaf: leaf node to append to.
w: corresponding weight contributions for all classes.
"""
for x in w:
leaf.vector.value.append(x)
def _set_float_split(split, feat_col, thresh, l_id, r_id):
"""Helper method for building tree float splits.
Sets split feature column, threshold and children.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.threshold = thresh
split.left_id = l_id
split.right_id = r_id
def _set_categorical_id_split(split, feat_col, feat_id, l_id, r_id):
"""Helper method for building tree categorical id splits.
Sets split feature column, feature id and children.
Args:
split: categorical id split node.
feat_col: feature column for the split.
feat_id: feature id forming rule x == id.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.feature_id = feat_id
split.left_id = l_id
split.right_id = r_id
class PredictionOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the prediction tests.
Create a batch of two examples having one dense float, two sparse float and
one sparse int features.
The data looks like the following:
| Instance | Dense0 | SparseF0 | SparseF1 | SparseI0 |
| 0 | 7 | -3 | | 9,1 |
| 1 | -2 | | 4 | |
"""
super(PredictionOpsTest, self).setUp()
self._dense_float_tensor = np.array([[7.0], [-2.0]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
self._seed = 123
def testEmptyEnsemble(self):
with self.test_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="empty")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
self.assertAllEqual([[0], [0]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testBiasEnsembleSingleClass(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="bias")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
self.assertAllClose([[-0.4], [-0.4]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testBiasEnsembleMultiClass(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
leaf = tree.nodes.add().leaf
_append_to_leaf(leaf, 0, -0.4)
_append_to_leaf(leaf, 1, 0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="multiclass")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
self.assertAllClose([[-0.4, 0.9], [-0.4, 0.9]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testFullEnsembleSingleClass(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8.
self.assertAllClose([[-1.3], [0.8]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testExcludeNonFinalTree(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = False
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# All the examples should get only the bias since the second tree is
# non-finalized
self.assertAllClose([[-0.4], [-0.4]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testIncludeNonFinalTree(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = False
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8. Note that the non-finalized tree is included.
self.assertAllClose([[-1.3], [0.8]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testMetadataMissing(self):
# Sometimes we want to do prediction on trees that are not added to ensemble
# (for example in
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
# We are not setting the tree_ensemble_config.tree_metadata in this test.
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8.
self.assertAllClose([[-1.3], [0.8]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For TREE_PER_CLASS strategy, predictions size is num_classes-1
def testFullEnsembleMultiClassTreePerClassStrategy(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 hence [0.0, 1.0].
self.assertAllClose([[0.5, -0.2], [0, 1.0]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For all non-tree-per class multiclass handling strategies, predictions vec
# will have the size of the number of classes.
# This test is when leafs have SPARSE weights stored (class id and
# contribution).
def testFullEnsembleMultiNotClassTreePerClassStrategySparseVector(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_multi_values_to_leaf(tree2.nodes.add().leaf, [1, 2], [1.2, -0.7])
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -0.7].
self.assertAllClose([[0.5, -0.2, 0.0], [0, 1.0, -0.7]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For all non-tree-per class multiclass handling strategies, predictions vec
# will have the size of the number of classes.
# This test is when leafs have DENSE weights stored (weight for each class)
def testFullEnsembleMultiNotClassTreePerClassStrategyDenseVector(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_multi_values_to_dense_leaf(tree1.nodes.add().leaf, [0, -0.2, -2])
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0.5, 0, 0])
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0, 1.2, -0.7])
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [-0.9, 0, 0])
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
result, result_no_dropout, dropout_info = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False))
# The first example will get bias class 1 -0.2 and -2 for class 2 from
# first tree and leaf 2 payload (sparse feature missing) of 0.5 hence
# 0.5, -0.2], the second example will get the same bias and leaf 3 payload
# of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -2.7].
self.assertAllClose([[0.5, -0.2, -2.0], [0, 1.0, -2.7]], result.eval())
self.assertAllEqual(result_no_dropout.eval(), result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def _get_predictions(self,
tree_ensemble_handle,
learner_config,
apply_dropout=False,
apply_averaging=False,
center_bias=False):
return prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=apply_dropout,
apply_averaging=apply_averaging,
center_bias=center_bias)
def testDropout(self):
with self.test_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
# Apply dropout.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, result_no_dropout, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config,
apply_dropout=True,
apply_averaging=False,
center_bias=False)
# We expect approx 500 trees were dropped.
dropout_info = dropout_info.eval()
self.assertIn(dropout_info[0].size, range(400, 601))
self.assertEqual(dropout_info[0].size, dropout_info[1].size)
self.assertEqual(result.eval().size, result_no_dropout.eval().size)
for i in range(result.eval().size):
self.assertNotEqual(result.eval()[i], result_no_dropout.eval()[i])
for i in range(dropout_info[0].size):
dropped_index = dropout_info[0][i]
dropped_weight = dropout_info[1][i]
# We constructed the trees so tree number + 1 is the tree weight, so
# we can check here the weights for dropped trees.
self.assertEqual(dropped_index + 1, dropped_weight)
# Don't apply dropout.
result, result_no_dropout, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config,
apply_dropout=False,
apply_averaging=False,
center_bias=False)
# We expect none of the trees were dropped.
self.assertAllEqual([[], []], dropout_info.eval())
self.assertAllEqual(result.eval(), result_no_dropout.eval())
def testDropoutCenterBiasNoGrowingMeta(self):
# This is for normal non-batch mode where ensemble does not contain the tree
# that is being built currently.
num_trees = 10
with self.test_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, num_trees):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
# Drop all the trees.
learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, result_no_dropout, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config,
apply_dropout=True,
apply_averaging=False,
center_bias=False)
result_center, result_no_dropout_center, dropout_info_center = (
self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config,
apply_dropout=True,
apply_averaging=False,
center_bias=True))
dropout_info = dropout_info.eval()
dropout_info_center = dropout_info_center.eval()
# With centering, the bias tree is not dropped.
num_dropped = dropout_info[0].size
self.assertEqual(num_dropped, num_trees)
num_dropped_center = dropout_info_center[0].size
self.assertEqual(num_dropped_center, num_trees - 1)
result = result.eval()
result_center = result_center.eval()
for i in range(result.size):
self.assertNotEqual(result[i], result_center[i])
# First dropped tree is a bias tree 0.
self.assertEqual(0, dropout_info[0][0])
# Last dropped tree is the last tree.
self.assertEqual(num_trees - 1, dropout_info[0][num_dropped - 1])
# First dropped tree is a tree 1.
self.assertEqual(1, dropout_info_center[0][0])
# Last dropped tree is the last tree.
self.assertEqual(num_trees - 1, dropout_info_center[0][num_dropped_center
- 1])
self.assertAllEqual(result_no_dropout.eval(),
result_no_dropout_center.eval())
def testDropoutCenterBiasWithGrowingMeta(self):
# This is batch mode where ensemble already contains the tree that we are
# building. This tree should never be dropped.
num_trees = 10
with self.test_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, num_trees):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Add growing metadata to indicate batch mode.
tree_ensemble_config.growing_metadata.num_trees_attempted = num_trees
tree_ensemble_config.growing_metadata.num_layers_attempted = num_trees
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
# Drop all the trees.
learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, result_no_dropout, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config,
apply_dropout=True,
apply_averaging=False,
center_bias=False)
result_center, result_no_dropout_center, dropout_info_center = (
self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config,
apply_dropout=True,
apply_averaging=False,
center_bias=True))
dropout_info = dropout_info.eval()
dropout_info_center = dropout_info_center.eval()
# Last tree is never dropped, the bias tree can be dropped.
num_dropped = dropout_info[0].size
self.assertEqual(num_dropped, num_trees - 1)
num_dropped_center = dropout_info_center[0].size
self.assertEqual(num_dropped_center, num_trees - 2)
result = result.eval()
result_center = result_center.eval()
for i in range(result.size):
self.assertNotEqual(result[i], result_center[i])
# First dropped tree is a bias tree 0.
self.assertEqual(0, dropout_info[0][0])
# Last dropped tree is not the last tree (not tree num_trees-1).
self.assertNotEqual(num_trees - 1, dropout_info[0][num_dropped - 1])
# First dropped tree is a tree 1.
self.assertEqual(1, dropout_info_center[0][0])
# Last dropped tree is not the last tree in ensemble.
self.assertNotEqual(num_trees - 1,
dropout_info_center[0][num_dropped_center - 1])
self.assertAllEqual(result_no_dropout.eval(),
result_no_dropout_center.eval())
def testDropoutSeed(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="empty")
resources.initialize_resources(resources.shared_resources()).run()
_, result_no_dropout_1, dropout_info_1 = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False))
_, result_no_dropout_2, dropout_info_2 = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False))
# Different seed.
_, result_no_dropout_3, dropout_info_3 = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
112314, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False))
# First seed with centering bias.
_, result_no_dropout_4, dropout_info_4 = (
prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True))
# The same seed returns the same results.
self.assertAllEqual(dropout_info_1.eval(), dropout_info_2.eval())
# Different seeds give diff results.
self.assertNotEqual(dropout_info_3.eval().shape,
dropout_info_2.eval().shape)
# With centering bias and the same seed does not give the same result.
self.assertNotEqual(dropout_info_4.eval(), dropout_info_1.eval())
# With centering bias has 1 less tree dropped (bias tree is not dropped).
self.assertEqual(
len(dropout_info_4.eval()[0]) + 1, len(dropout_info_1.eval()[0]))
# Predictions without dropout are all the same.
result, result_no_dropout, _ = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False)
self.assertAllCloseAccordingToType(result.eval(),
result_no_dropout.eval())
self.assertAllCloseAccordingToType(result.eval(),
result_no_dropout_1.eval())
self.assertAllCloseAccordingToType(result.eval(),
result_no_dropout_2.eval())
self.assertAllCloseAccordingToType(result.eval(),
result_no_dropout_3.eval())
self.assertAllCloseAccordingToType(result.eval(),
result_no_dropout_4.eval())
def testAveragingAllTrees(self):
with self.test_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# Add 100 trees with some weights.
# When averaging is applied, the tree weights will essentially change to
# 1, 98/99, 97/99 etc, so lets create the ensemble with such weights.
# too
total_num = 100
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (total_num - i) / total_num)
# Prepare learner config WITH AVERAGING.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.averaging_config.average_last_percent_trees = 1.0
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
# Do averaging.
result, result_no_dropout, dropout_info = self._get_predictions(
tree_ensemble_handle, learner_config, apply_averaging=True)
pattern_result, pattern_result_no_dropout, pattern_dropout_info = (
self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging,
apply_averaging=False))
self.assertAllEqual(result_no_dropout.eval(),
pattern_result_no_dropout.eval())
self.assertAllEqual(result.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
def testAveragingSomeTrees(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# Add 1000 trees with some weights.
total_num = 100
num_averaged = 25
j = 0
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging - we are adjusting
# the weights of the last 25 trees
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
if i >= 75:
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (num_averaged - j) / num_averaged)
j += 1
else:
adjusted_tree_ensemble_config.tree_weights.append(1.0)
# Prepare learner config WITH AVERAGING.
learner_config_1 = learner_pb2.LearnerConfig()
learner_config_1.num_classes = 2
learner_config_1.averaging_config.average_last_percent_trees = 0.25
# This is equivalent.
learner_config_2 = learner_pb2.LearnerConfig()
learner_config_2.num_classes = 2
learner_config_2.averaging_config.average_last_n_trees = 25
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
result_1, result_no_dropout_1, dropout_info_1 = self._get_predictions(
tree_ensemble_handle, learner_config_1, apply_averaging=True)
result_2, result_no_dropout_2, dropout_info_2 = self._get_predictions(
tree_ensemble_handle, learner_config_2, apply_averaging=True)
pattern_result, pattern_result_no_dropout, pattern_dropout_info = (
self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging,
apply_averaging=False))
self.assertAllEqual(result_no_dropout_1.eval(),
pattern_result_no_dropout.eval())
self.assertAllEqual(result_no_dropout_2.eval(),
pattern_result_no_dropout.eval())
self.assertAllEqual(result_1.eval(), pattern_result.eval())
self.assertAllEqual(result_2.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info_1.eval(), pattern_dropout_info.eval())
self.assertAllEqual(dropout_info_2.eval(), pattern_dropout_info.eval())
def testAverageMoreThanNumTreesExist(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# When we say to average over more trees than possible, it is averaging
# across all trees.
total_num = 100
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (total_num - i) / total_num)
# Prepare learner config WITH AVERAGING.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
# We have only 100 trees but we ask to average over 250.
learner_config.averaging_config.average_last_n_trees = 250
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
result, result_no_dropout, dropout_info = self._get_predictions(
tree_ensemble_handle, learner_config, apply_averaging=True)
pattern_result, pattern_result_no_dropout, pattern_dropout_info = (
self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging,
apply_averaging=False))
self.assertAllEqual(result_no_dropout.eval(),
pattern_result_no_dropout.eval())
self.assertAllEqual(result.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
class PartitionExamplesOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the prediction tests.
Create a batch of two examples having one dense float, two sparse float and
one sparse int features.
The data looks like the following:
| Instance | Dense0 | SparseF0 | SparseF1 | SparseI0 |
| 0 | 7 | -3 | | 9,1 |
| 1 | -2 | | 4 | |
"""
super(PartitionExamplesOpsTest, self).setUp()
self._dense_float_tensor = np.array([[7.0], [-2.0]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
def testEnsembleEmpty(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1])
self.assertAllEqual([0, 0], result.eval())
def testTreeNonFinalized(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree1.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
_set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = False
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1])
self.assertAllEqual([5, 3], result.eval())
def testTreeFinalized(self):
with self.test_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree1.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
_set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor], [
self._sparse_float_indices1, self._sparse_float_indices2
], [self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1,
self._sparse_float_shape2], [self._sparse_int_indices1],
[self._sparse_int_values1], [self._sparse_int_shape1])
self.assertAllEqual([0, 0], result.eval())
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
splav/servo | tests/wpt/web-platform-tests/tools/wptserve/tests/test_config.py | 21 | 11679 | import json
import logging
import pickle
from distutils.spawn import find_executable
from logging import handlers
import pytest
config = pytest.importorskip("wptserve.config")
def test_renamed_are_renamed():
assert len(set(config._renamed_props.keys()) & set(config.ConfigBuilder._default.keys())) == 0
def test_renamed_exist():
assert set(config._renamed_props.values()).issubset(set(config.ConfigBuilder._default.keys()))
@pytest.mark.parametrize("base, override, expected", [
({"a": 1}, {"a": 2}, {"a": 2}),
({"a": 1}, {"b": 2}, {"a": 1}),
({"a": {"b": 1}}, {"a": {}}, {"a": {"b": 1}}),
({"a": {"b": 1}}, {"a": {"b": 2}}, {"a": {"b": 2}}),
({"a": {"b": 1}}, {"a": {"b": 2, "c": 3}}, {"a": {"b": 2}}),
pytest.param({"a": {"b": 1}}, {"a": 2}, {"a": 1}, marks=pytest.mark.xfail),
pytest.param({"a": 1}, {"a": {"b": 2}}, {"a": 1}, marks=pytest.mark.xfail),
])
def test_merge_dict(base, override, expected):
assert expected == config._merge_dict(base, override)
def test_logger_created():
with config.ConfigBuilder() as c:
assert c.logger is not None
def test_logger_preserved():
logger = logging.getLogger("test_logger_preserved")
logger.setLevel(logging.DEBUG)
with config.ConfigBuilder(logger=logger) as c:
assert c.logger is logger
def test_as_dict():
with config.ConfigBuilder() as c:
assert c.as_dict() is not None
def test_as_dict_is_json():
with config.ConfigBuilder() as c:
assert json.dumps(c.as_dict()) is not None
def test_init_basic_prop():
with config.ConfigBuilder(browser_host="foo.bar") as c:
assert c.browser_host == "foo.bar"
def test_init_prefixed_prop():
with config.ConfigBuilder(doc_root="/") as c:
assert c.doc_root == "/"
def test_init_renamed_host():
logger = logging.getLogger("test_init_renamed_host")
logger.setLevel(logging.DEBUG)
handler = handlers.BufferingHandler(100)
logger.addHandler(handler)
with config.ConfigBuilder(logger=logger, host="foo.bar") as c:
assert c.logger is logger
assert len(handler.buffer) == 1
assert "browser_host" in handler.buffer[0].getMessage() # check we give the new name in the message
assert not hasattr(c, "host")
assert c.browser_host == "foo.bar"
def test_init_bogus():
with pytest.raises(TypeError) as e:
config.ConfigBuilder(foo=1, bar=2)
message = e.value.args[0]
assert "foo" in message
assert "bar" in message
def test_getitem():
with config.ConfigBuilder(browser_host="foo.bar") as c:
assert c["browser_host"] == "foo.bar"
def test_no_setitem():
with config.ConfigBuilder() as c:
with pytest.raises(TypeError):
c["browser_host"] = "foo.bar"
def test_iter():
with config.ConfigBuilder() as c:
s = set(iter(c))
assert "browser_host" in s
assert "host" not in s
assert "__getitem__" not in s
assert "_browser_host" not in s
def test_assignment():
cb = config.ConfigBuilder()
cb.browser_host = "foo.bar"
with cb as c:
assert c.browser_host == "foo.bar"
def test_update_basic():
cb = config.ConfigBuilder()
cb.update({"browser_host": "foo.bar"})
with cb as c:
assert c.browser_host == "foo.bar"
def test_update_prefixed():
cb = config.ConfigBuilder()
cb.update({"doc_root": "/"})
with cb as c:
assert c.doc_root == "/"
def test_update_renamed_host():
logger = logging.getLogger("test_update_renamed_host")
logger.setLevel(logging.DEBUG)
handler = handlers.BufferingHandler(100)
logger.addHandler(handler)
cb = config.ConfigBuilder(logger=logger)
assert cb.logger is logger
assert len(handler.buffer) == 0
cb.update({"host": "foo.bar"})
with cb as c:
assert len(handler.buffer) == 1
assert "browser_host" in handler.buffer[0].getMessage() # check we give the new name in the message
assert not hasattr(c, "host")
assert c.browser_host == "foo.bar"
def test_update_bogus():
cb = config.ConfigBuilder()
with pytest.raises(KeyError):
cb.update({"foobar": 1})
def test_ports_auto():
with config.ConfigBuilder(ports={"http": ["auto"]},
ssl={"type": "none"}) as c:
ports = c.ports
assert set(ports.keys()) == {"http"}
assert len(ports["http"]) == 1
assert isinstance(ports["http"][0], int)
def test_ports_auto_mutate():
cb = config.ConfigBuilder(ports={"http": [1001]},
ssl={"type": "none"})
cb.ports = {"http": ["auto"]}
with cb as c:
new_ports = c.ports
assert set(new_ports.keys()) == {"http"}
assert len(new_ports["http"]) == 1
assert isinstance(new_ports["http"][0], int)
def test_ports_explicit():
with config.ConfigBuilder(ports={"http": [1001]},
ssl={"type": "none"}) as c:
ports = c.ports
assert set(ports.keys()) == {"http"}
assert ports["http"] == [1001]
def test_ports_no_ssl():
with config.ConfigBuilder(ports={"http": [1001], "https": [1002], "ws": [1003], "wss": [1004]},
ssl={"type": "none"}) as c:
ports = c.ports
assert set(ports.keys()) == {"http", "ws"}
assert ports["http"] == [1001]
assert ports["ws"] == [1003]
@pytest.mark.skipif(find_executable("openssl") is None,
reason="requires OpenSSL")
def test_ports_openssl():
with config.ConfigBuilder(ports={"http": [1001], "https": [1002], "ws": [1003], "wss": [1004]},
ssl={"type": "openssl"}) as c:
ports = c.ports
assert set(ports.keys()) == {"http", "https", "ws", "wss"}
assert ports["http"] == [1001]
assert ports["https"] == [1002]
assert ports["ws"] == [1003]
assert ports["wss"] == [1004]
def test_init_doc_root():
with config.ConfigBuilder(doc_root="/") as c:
assert c.doc_root == "/"
def test_set_doc_root():
cb = config.ConfigBuilder()
cb.doc_root = "/"
with cb as c:
assert c.doc_root == "/"
def test_server_host_from_browser_host():
with config.ConfigBuilder(browser_host="foo.bar") as c:
assert c.server_host == "foo.bar"
def test_init_server_host():
with config.ConfigBuilder(server_host="foo.bar") as c:
assert c.browser_host == "localhost" # check this hasn't changed
assert c.server_host == "foo.bar"
def test_set_server_host():
cb = config.ConfigBuilder()
cb.server_host = "/"
with cb as c:
assert c.browser_host == "localhost" # check this hasn't changed
assert c.server_host == "/"
def test_domains():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
assert c.domains == {
"": {
"": "foo.bar",
"a": "a.foo.bar",
"b": "b.foo.bar",
},
"alt": {
"": "foo2.bar",
"a": "a.foo2.bar",
"b": "b.foo2.bar",
},
}
def test_not_domains():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
not_domains = c.not_domains
assert not_domains == {
"": {
"x": "x.foo.bar",
"y": "y.foo.bar",
},
"alt": {
"x": "x.foo2.bar",
"y": "y.foo2.bar",
},
}
def test_domains_not_domains_intersection():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
domains = c.domains
not_domains = c.not_domains
assert len(set(domains.keys()) ^ set(not_domains.keys())) == 0
for host in domains.keys():
host_domains = domains[host]
host_not_domains = not_domains[host]
assert len(set(host_domains.keys()) & set(host_not_domains.keys())) == 0
assert len(set(host_domains.values()) & set(host_not_domains.values())) == 0
def test_all_domains():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
all_domains = c.all_domains
assert all_domains == {
"": {
"": "foo.bar",
"a": "a.foo.bar",
"b": "b.foo.bar",
"x": "x.foo.bar",
"y": "y.foo.bar",
},
"alt": {
"": "foo2.bar",
"a": "a.foo2.bar",
"b": "b.foo2.bar",
"x": "x.foo2.bar",
"y": "y.foo2.bar",
},
}
def test_domains_set():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
domains_set = c.domains_set
assert domains_set == {
"foo.bar",
"a.foo.bar",
"b.foo.bar",
"foo2.bar",
"a.foo2.bar",
"b.foo2.bar",
}
def test_not_domains_set():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
not_domains_set = c.not_domains_set
assert not_domains_set == {
"x.foo.bar",
"y.foo.bar",
"x.foo2.bar",
"y.foo2.bar",
}
def test_all_domains_set():
with config.ConfigBuilder(browser_host="foo.bar",
alternate_hosts={"alt": "foo2.bar"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as c:
all_domains_set = c.all_domains_set
assert all_domains_set == {
"foo.bar",
"a.foo.bar",
"b.foo.bar",
"x.foo.bar",
"y.foo.bar",
"foo2.bar",
"a.foo2.bar",
"b.foo2.bar",
"x.foo2.bar",
"y.foo2.bar",
}
def test_ssl_env_none():
with config.ConfigBuilder(ssl={"type": "none"}) as c:
assert c.ssl_config is None
def test_ssl_env_openssl():
# TODO: this currently actually tries to start OpenSSL, which isn't ideal
# with config.ConfigBuilder(ssl={"type": "openssl", "openssl": {"openssl_binary": "foobar"}}) as c:
# assert c.ssl_env is not None
# assert c.ssl_env.ssl_enabled is True
# assert c.ssl_env.binary == "foobar"
pass
def test_ssl_env_bogus():
with pytest.raises(ValueError):
with config.ConfigBuilder(ssl={"type": "foobar"}):
pass
def test_pickle():
# Ensure that the config object can be pickled
with config.ConfigBuilder() as c:
pickle.dumps(c)
| mpl-2.0 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim/configs/estimation_config_for_model_members.py | 2 | 2100 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.configs.base_configuration import AbstractUrbansimConfiguration
from urbansim.configs.estimation_base_config import EstimationBaseConfig
from urbansim.estimation.estimator import update_controller_by_specification_from_module
from opus_core.configuration import Configuration
class model_member_configuration:
def __init__(self, model_name, type, add_member_prefix=False, base_configuration=AbstractUrbansimConfiguration):
self.type=type
self.model_name = model_name
self.member_prefix_added = add_member_prefix
self.model_group = model_name
if add_member_prefix:
self.model_name = "%s_%s" % (self.type, self.model_name)
self.base_configuration = base_configuration
def get_configuration(self):
run_configuration = EstimationBaseConfig(base_configuration=self.base_configuration)
local_configuration = self.get_local_configuration()
run_configuration.merge(local_configuration)
return run_configuration
def get_local_configuration(self):
run_configuration = {}
run_configuration["models"] = [
{self.model_group: {"group_members": [{self.type: ["estimate"]}]}}
]
if self.member_prefix_added:
run_configuration["model_name"] = self.model_name
else:
run_configuration["model_name"] = "%s_%s" % (self.type, self.model_name)
return Configuration(run_configuration)
def get_updated_configuration_from_module(self, run_configuration, specification_module=None):
run_configuration = update_controller_by_specification_from_module(
run_configuration, self.model_name, specification_module)
run_configuration["models_configuration"][self.model_name]["controller"]["prepare_for_estimate"]["arguments"]["specification_dict"] = "spec['%s']" % self.type
return run_configuration | gpl-2.0 |
mdhaman/superdesk-aap | server/aap/macros/reuters_derive_dateline_test.py | 2 | 8056 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.tests import TestCase
from .reuters_derive_dateline import reuters_derive_dateline
import datetime
class ReutersDeriveDatelineTests(TestCase):
def test_simple_case(self):
item = dict()
item['firstcreated'] = datetime.datetime(2015, 10, 26, 11, 45, 19, 0)
item['body_html'] = '<p>DETROIT (Reuters) - General Motors Co <GM.N> Chief Financial Officer Chuck Stevens \
said on Wednesday the macroeconomic challenges in Brazil will remain in the near term but the company \
has \"huge upside leverage once the macro situation changes\" in South America\'s largest \
economy.</p>\n<p>GM\'s car sales so far in October are up versus a year ago, Stevens said to reporters \
after the No. 1 U.S. automaker reported third-quarter financial results.</p>\n<p>Stevens also \
reaffirmed GM\'s past forecasts that it will show profit in Europe in 2016. It would be GM\'s first \
profit in Europe since 1999.</p>\n<p> (Reporting by Bernie Woodall and Joseph White; \
Editing by Chizu Nomiyamam and Jeffrey Benkoe)</p>'
reuters_derive_dateline(item)
self.assertEqual(item['dateline']['located']['city'], 'Detroit')
def test_with_a_date(self):
item = dict()
item['firstcreated'] = datetime.datetime(2015, 10, 26, 11, 45, 19, 0)
item['body_html'] = '<p>PARIS, Oct 22 (Reuters) - Eurotunnel said on\nThursday that third-quarter revenue \
rose 3 percent to 334.4\nmillion euros ($379.48 million), as economic recovery helped\noffset the \
impact of the disruption to traffic resulting from\nthe migrant crisis.</p>\n<p>The operator of the \
Channel Tunnel linking France and\nBritain said that business remained dynamic, driven by a\nrecovering \
economy in Britain and to a lesser extent in the\neuro-zone.</p>\n<p>But a camp of around 6,000 migrants \
in the Calais area\nfleeing war, political turmoil and poverty outside Europe has\ncaused disruption to \
traffic since Summer.</p>\n<p>Eurotunnel carries Eurostar high-speed trains between Paris,\nBrussels and \
London, as well as shuttle trains containing\npassenger cars, coaches and freight trucks.</p>\n<p>Rail \
freight tonnage fell 27 percent year-on-year and the\nnumber of freight trains using the Channel \
tunnel fell 33\npercent, the company said - blaming the drop on the migrant\ncrisis.</p>\n<p>Passenger \
traffic in the quarter rose 2 percent year-on-year\nto 2,866,155 on the Eurostar. Traffic however fell \
1 percent on\ntrucks and 9 percent on coaches compared with the same period\nlast year.</p>\n<p>In July \
Eurotunnel asked the French and British governments\nto reimburse it for close to 10 million euros it spent \
to beef\nup security to cope with a migrant crisis at the French port of\nCalais.</p>\n<p>Third quarter \
sales figures no longer include MyFerryLink,\nthe ferry service between Britain and France, which ended \
its \nits activity on June 29.</p>\n<p>($1 = 0.8812 euros)\n\n (Reporting by Dominique Vidalon; Editing \
by Andrew Callus)</p>",'
reuters_derive_dateline(item)
self.assertEqual(item['dateline']['located']['city'], 'PARIS')
def test_with_a_byline(self):
item = dict()
item['firstcreated'] = datetime.datetime(2015, 10, 26, 11, 45, 19, 0)
item['byline'] = 'By Karl Plume'
item['body_html'] = '<p>By Karl Plume</p>\n<p>CHICAGO, Oct 21 (Reuters) - Chicago Cubs supporters have \
uttered the phrase \"wait till next year\" perhaps more than any other fans in baseball, with their team\'s \
championship drought stretching to 107 years after being swept from this year\'s playoffs by the New York \
Mets.</p>\n<p>However, the 2015 Cubs have given Chicago\'s north-side faithful reason to believe that \
their wait for a title, the longest in U.S. professional sports, might soon come to an end.</p>\n<p>The \
Mets ensured the Cubs\' unprecedented streak will continue another year with an 8-3 victory on Wednesday \
that saw them capture the National League pennant and claim a place in the World Series against \
Kansas City or Toronto.</p>\n<p>While the Cubs\' clubhouse was disappointed after the defeat, there were \
real signs of hope.</p>'
reuters_derive_dateline(item)
self.assertEqual(item['dateline']['located']['city'], 'Chicago')
def test_with_a_dateline_already_leave_it_alone(self):
item = dict()
item['firstcreated'] = datetime.datetime(2015, 10, 26, 11, 45, 19, 0)
item['dateline'] = {'located': {'city': 'Chicargo'}}
item['body_html'] = '<p>DONT CARE (Reuters) - Chicago Cubs supporters have \
uttered the phrase \"wait till next year\" perhaps more than any other fans in baseball, with their team\'s \
championship drought stretching to 107 years after being swept from this year\'s playoffs by the New York \
Mets.</p>\n<p>However, the 2015 Cubs have given Chicago\'s north-side faithful reason to believe that \
their wait for a title, the longest in U.S. professional sports, might soon come to an end.</p>\n<p>The \
Mets ensured the Cubs\' unprecedented streak will continue another year with an 8-3 victory on Wednesday \
that saw them capture the National League pennant and claim a place in the World Series against \
Kansas City or Toronto.</p>\n<p>While the Cubs\' clubhouse was disappointed after the defeat, there were \
real signs of hope.</p>'
reuters_derive_dateline(item)
self.assertEqual(item['dateline']['located']['city'], 'Chicargo')
def test_with_just_a_date(self):
item = dict()
item['firstcreated'] = datetime.datetime(2015, 10, 26, 11, 45, 19, 0)
item['body_html'] = '<p>Oct 22 (Reuters) - Eurotunnel said on\nThursday that third-quarter revenue \
rose 3 percent to 334.4\nmillion euros ($379.48 million), as economic recovery helped\noffset the \
impact of the disruption to traffic resulting from\nthe migrant crisis.</p>\n<p>The operator of the \
Channel Tunnel linking France and\nBritain said that business remained dynamic, driven by a\nrecovering \
economy in Britain and to a lesser extent in the\neuro-zone.</p>'
reuters_derive_dateline(item)
self.assertNotIn('dateline', item)
def test_from_bangalore(self):
item = {'dateline': {'located': {'city': 'Bangalore'}}}
item['firstcreated'] = datetime.datetime(2015, 10, 26, 11, 45, 19, 0)
item['body_html'] = '<p>Wagga Wagga (Reuters) - Chicago Cubs supporters have \
uttered the phrase \"wait till next year\" perhaps more than any other fans in baseball, with their team\'s \
championship drought stretching to 107 years after being swept from this year\'s playoffs by the New York \
Mets.</p>\n<p>However, the 2015 Cubs have given Chicago\'s north-side faithful reason to believe that \
their wait for a title, the longest in U.S. professional sports, might soon come to an end.</p>\n<p>The \
Mets ensured the Cubs\' unprecedented streak will continue another year with an 8-3 victory on Wednesday \
that saw them capture the National League pennant and claim a place in the World Series against \
Kansas City or Toronto.</p>\n<p>While the Cubs\' clubhouse was disappointed after the defeat, there were \
real signs of hope.</p>'
reuters_derive_dateline(item)
self.assertEqual(item['dateline']['located']['city'], 'Wagga Wagga')
| agpl-3.0 |
doselect/django-tastypie | tastypie/fields.py | 4 | 34355 | from __future__ import unicode_literals
import datetime
from dateutil.parser import parse
import decimal
from decimal import Decimal
import importlib
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import models
try:
from django.db.models.fields.related import\
SingleRelatedObjectDescriptor as ReverseOneToOneDescriptor
except ImportError:
from django.db.models.fields.related_descriptors import\
ReverseOneToOneDescriptor
from django.utils import datetime_safe, six
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
# All the ApiField variants.
class ApiField(object):
"The base implementation of a field used by the resources."
is_m2m = False
is_related = False
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None, use_in='all', verbose_name=None):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
Optionally accepts ``use_in``. This may be one of ``list``, ``detail``
``all`` or a callable which accepts a ``bundle`` and returns
``True`` or ``False``. Indicates wheather this field will be included
during dehydration of a list of objects or a single object. If ``use_in``
is a callable, and returns ``True``, the field will be included during
dehydration.
Defaults to ``all``.
Optionally accepts ``verbose_name``, which lets you provide a
more verbose name of the field exposed at the schema level.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
# Check for `__` in the field for looking through the relation.
self._attrs = attribute.split('__') if attribute is not None and isinstance(attribute, six.string_types) else []
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.unique = unique
self.use_in = 'all'
if use_in in ['all', 'detail', 'list'] or callable(use_in):
self.use_in = use_in
self.verbose_name = verbose_name
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle, for_list=True):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
current_object = bundle.obj
for attr in self._attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if self.instance_name not in bundle.data:
if self.is_related and not self.is_m2m:
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
if self.blank:
return None
if self.attribute:
try:
val = getattr(bundle.obj, self.attribute, None)
if val is not None:
return val
except ObjectDoesNotExist:
pass
if self.instance_name:
try:
if hasattr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
except ObjectDoesNotExist:
pass
if self.has_default():
if callable(self._default):
return self._default()
return self._default
if self.null:
return None
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
return bundle.data[self.instance_name]
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
def hydrate(self, bundle):
value = super(DecimalField, self).hydrate(bundle)
if value and not isinstance(value, Decimal):
try:
value = Decimal(value)
except decimal.InvalidOperation:
raise ApiFieldError("Invalid decimal string for '%s' field: '%s'" % (self.instance_name, value))
return value
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
try:
year, month, day = value[:10].split('-')
return datetime_safe.date(int(year), int(month), int(day))
except ValueError:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
try:
year, month, day = value[:10].split('-')
hour, minute, second = value[10:18].split(':')
return make_aware(datetime_safe.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)))
except ValueError:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
if isinstance(value, six.string_types):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except (ValueError, TypeError):
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
else:
raise ApiFieldError("Datetime provided to '%s' field must be a string: %s" % (self.instance_name, value))
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None, use_in='all', verbose_name=None, full_list=True, full_detail=True):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
Optionally accepts ``use_in``. This may be one of ``list``, ``detail``
``all`` or a callable which accepts a ``bundle`` and returns
``True`` or ``False``. Indicates wheather this field will be included
during dehydration of a list of objects or a single object. If ``use_in``
is a callable, and returns ``True``, the field will be included during
dehydration.
Defaults to ``all``.
Optionally accepts ``verbose_name``, which lets you provide a
more verbose name of the field exposed at the schema level.
Optionally accepts a ``full_list``, which indicated whether or not
data should be fully dehydrated when the request is for a list of
resources. Accepts ``True``, ``False`` or a callable that accepts
a bundle and returns ``True`` or ``False``. Depends on ``full``
being ``True``. Defaults to ``True``.
Optionally accepts a ``full_detail``, which indicated whether or not
data should be fully dehydrated when then request is for a single
resource. Accepts ``True``, ``False`` or a callable that accepts a
bundle and returns ``True`` or ``False``.Depends on ``full``
being ``True``. Defaults to ``True``.
"""
super(RelatedField, self).__init__(attribute=attribute, default=default, null=null, blank=blank, readonly=readonly, unique=unique, help_text=help_text, use_in=use_in, verbose_name=verbose_name)
self.related_name = related_name
self.to = to
self._to_class = None
self._rel_resources = {}
self.full = full
self.full_list = full_list if callable(full_list) else lambda bundle: full_list
self.full_detail = full_detail if callable(full_detail) else lambda bundle: full_detail
self.api_name = None
self.resource_name = None
def get_related_resource(self, related_instance):
"""
Instaniates the related resource.
"""
related_class = type(related_instance)
if related_class in self._rel_resources:
return self._rel_resources[related_class]
related_resource = self.to_class()
# Fix the ``api_name`` if it's not present.
if related_resource._meta.api_name is None:
if self._resource and self._resource._meta.api_name is not None:
related_resource._meta.api_name = self._resource._meta.api_name
self._rel_resources[related_class] = related_resource
return related_resource
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, six.string_types):
self._to_class = self.to
return self._to_class
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.to == 'self':
self._to_class = self._resource
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource, for_list=True):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
should_dehydrate_full_resource = self.should_full_dehydrate(bundle, for_list=for_list)
if not should_dehydrate_full_resource:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(
obj=bundle.obj,
request=bundle.request,
objects_saved=bundle.objects_saved
)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
err_msg = "Could not find the provided %s object via resource URI '%s'." % (fk_resource._meta.resource_name, uri,)
if not uri:
raise ApiFieldError(err_msg)
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(
obj=obj,
request=request,
via_uri=True
)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError(err_msg)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
obj = None
if getattr(fk_resource._meta, 'include_resource_uri', True) and 'resource_uri' in data:
uri = data['resource_uri']
err_msg = "Could not find the provided %s object via resource URI '%s'." % (fk_resource._meta.resource_name, uri,)
try:
obj = fk_resource.get_via_uri(uri, request=request)
except ObjectDoesNotExist:
raise ApiFieldError(err_msg)
fk_bundle = fk_resource.build_bundle(
data=data,
obj=obj,
request=request
)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
unique_keys = {
k: v
for k, v in data.items()
if k == 'pk' or (hasattr(fk_resource, k) and getattr(fk_resource, k).unique)
}
# If we have no unique keys, we shouldn't go look for some resource that
# happens to match other kwargs. In the case of a create, it might be the
# completely wrong resource.
# We also need to check to see if updates are allowed on the FK resource.
if not obj and unique_keys:
try:
fk_resource.obj_get(fk_bundle, skip_errors=True, **data)
except (ObjectDoesNotExist, NotFound, TypeError):
try:
# Attempt lookup by primary key
fk_resource.obj_get(fk_bundle, skip_errors=True, **unique_keys)
except (ObjectDoesNotExist, NotFound):
pass
except MultipleObjectsReturned:
pass
# If we shouldn't update a resource, or we couldn't find a matching
# resource we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle)
return fk_bundle
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(
obj=obj,
request=request
)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
fk_resource = self.to_class()
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, Bundle):
# Already hydrated, probably nested bundles. Just return.
return value
elif isinstance(value, six.string_types):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(fk_resource, value, **kwargs)
elif isinstance(value, dict):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
def should_full_dehydrate(self, bundle, for_list):
"""
Based on the ``full``, ``list_full`` and ``detail_full`` returns ``True`` or ``False``
indicating weather the resource should be fully dehydrated.
"""
should_dehydrate_full_resource = False
if self.full:
is_details_view = not for_list
if is_details_view:
if self.full_detail(bundle):
should_dehydrate_full_resource = True
else:
if self.full_list(bundle):
should_dehydrate_full_resource = True
return should_dehydrate_full_resource
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None, use_in='all', verbose_name=None,
full_list=True, full_detail=True):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text, use_in=use_in,
verbose_name=verbose_name, full_list=full_list,
full_detail=full_detail
)
def contribute_to_class(self, cls, name):
super(ToOneField, self).contribute_to_class(cls, name)
if not self.related_name:
related_field = getattr(self._resource._meta.object_class, self.attribute, None)
if isinstance(related_field, ReverseOneToOneDescriptor):
# This is the case when we are writing to a reverse one to one field.
# Enable related name to make this work fantastically.
# see https://code.djangoproject.com/ticket/18638 (bug; closed; worksforme)
# and https://github.com/django-tastypie/django-tastypie/issues/566
# this gets the related_name of the one to one field of our model
self.related_name = related_field.related.field.name
def dehydrate(self, bundle, for_list=True):
foreign_obj = None
if callable(self.attribute):
previous_obj = bundle.obj
foreign_obj = self.attribute(bundle)
elif isinstance(self.attribute, six.string_types):
foreign_obj = bundle.obj
for attr in self._attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
if not foreign_obj:
if not self.null:
if callable(self.attribute):
raise ApiFieldError("The related resource for resource %s could not be found." % (previous_obj))
else:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
fk_resource = self.get_related_resource(foreign_obj)
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, fk_resource, for_list=for_list)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None, use_in='all', verbose_name=None,
full_list=True, full_detail=True):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text, use_in=use_in,
verbose_name=verbose_name, full_list=full_list,
full_detail=full_detail
)
def dehydrate(self, bundle, for_list=True):
if not bundle.obj or not bundle.obj.pk:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if callable(self.attribute):
the_m2ms = self.attribute(bundle)
elif isinstance(self.attribute, six.string_types):
the_m2ms = bundle.obj
for attr in self._attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
if isinstance(the_m2ms, models.Manager):
the_m2ms = the_m2ms.all()
m2m_dehydrated = [
self.dehydrate_related(
Bundle(obj=m2m, request=bundle.request),
self.get_related_resource(m2m),
for_list=for_list
)
for m2m in the_m2ms
]
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
if self.null:
return []
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
return [
self.build_related_resource(value, **kwargs)
for value in bundle.data.get(self.instance_name)
if value is not None
]
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj, for_list=True):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, six.string_types):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except (ValueError, TypeError) as e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second, dt.microsecond)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
| bsd-3-clause |
HolgerPeters/scikit-learn | doc/sphinxext/sphinx_gallery/backreferences.py | 23 | 6073 | # -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
========================
Backreferences Generator
========================
Reviews generated example files in order to keep track of used modules
"""
from __future__ import print_function
import ast
import os
# Try Python 2 first, otherwise load from Python 3
try:
import cPickle as pickle
except ImportError:
import pickle
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except Exception: # libraries can throw all sorts of exceptions...
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resolving used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
splitted = full_name.rsplit('.', 1)
if len(splitted) == 1:
# module without attribute. This is not useful for
# backreferences
continue
module, attribute = splitted
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def scan_used_functions(example_file, gallery_conf):
"""save variables so we can later add links to the documentation"""
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith(gallery_conf['doc_module']))
return backrefs
# XXX This figure:: uses a forward slash even on Windows, but the op.join's
# elsewhere will use backslashes...
THUMBNAIL_TEMPLATE = """
.. raw:: html
<div class="sphx-glr-thumbcontainer" tooltip="{snippet}">
.. only:: html
.. figure:: /{thumbnail}
:ref:`sphx_glr_{ref_name}`
.. raw:: html
</div>
"""
BACKREF_THUMBNAIL_TEMPLATE = THUMBNAIL_TEMPLATE + """
.. only:: not html
* :ref:`sphx_glr_{ref_name}`
"""
def _thumbnail_div(full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb',
'sphx_glr_%s_thumb.png' % fname[:-3])
ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE
return template.format(snippet=snippet, thumbnail=thumb, ref_name=ref_name)
def write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Writes down back reference files, which include a thumbnail list
of examples using a certain module"""
example_file = os.path.join(target_dir, fname)
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
include_path = os.path.join(gallery_conf['mod_example_dir'],
'%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
heading = '\n\nExamples using ``%s``' % backref
ex_file.write(heading + '\n')
ex_file.write('^' * len(heading) + '\n')
ex_file.write(_thumbnail_div(target_dir, fname, snippet,
is_backref=True))
seen_backrefs.add(backref)
| bsd-3-clause |
nrjcoin-project/p2pool | wstools/Namespaces.py | 292 | 9396 | # Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
"""Namespace module, so you don't need PyXML
"""
ident = "$Id$"
try:
from xml.ns import SOAP, SCHEMA, WSDL, XMLNS, DSIG, ENCRYPTION
DSIG.C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
except:
class SOAP:
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
ACTOR_NEXT = "http://schemas.xmlsoap.org/soap/actor/next"
class SCHEMA:
XSD1 = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_LIST = [ XSD1, XSD2, XSD3]
XSI1 = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_LIST = [ XSI1, XSI2, XSI3 ]
BASE = XSD3
class WSDL:
BASE = "http://schemas.xmlsoap.org/wsdl/"
BIND_HTTP = "http://schemas.xmlsoap.org/wsdl/http/"
BIND_MIME = "http://schemas.xmlsoap.org/wsdl/mime/"
BIND_SOAP = "http://schemas.xmlsoap.org/wsdl/soap/"
BIND_SOAP12 = "http://schemas.xmlsoap.org/wsdl/soap12/"
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
HTML = "http://www.w3.org/TR/REC-html40"
class DSIG:
BASE = "http://www.w3.org/2000/09/xmldsig#"
C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
C14N_COMM = "http://www.w3.org/TR/2000/CR-xml-c14n-20010315#WithComments"
C14N_EXCL = "http://www.w3.org/2001/10/xml-exc-c14n#"
DIGEST_MD2 = "http://www.w3.org/2000/09/xmldsig#md2"
DIGEST_MD5 = "http://www.w3.org/2000/09/xmldsig#md5"
DIGEST_SHA1 = "http://www.w3.org/2000/09/xmldsig#sha1"
ENC_BASE64 = "http://www.w3.org/2000/09/xmldsig#base64"
ENVELOPED = "http://www.w3.org/2000/09/xmldsig#enveloped-signature"
HMAC_SHA1 = "http://www.w3.org/2000/09/xmldsig#hmac-sha1"
SIG_DSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#dsa-sha1"
SIG_RSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#rsa-sha1"
XPATH = "http://www.w3.org/TR/1999/REC-xpath-19991116"
XSLT = "http://www.w3.org/TR/1999/REC-xslt-19991116"
class ENCRYPTION:
BASE = "http://www.w3.org/2001/04/xmlenc#"
BLOCK_3DES = "http://www.w3.org/2001/04/xmlenc#des-cbc"
BLOCK_AES128 = "http://www.w3.org/2001/04/xmlenc#aes128-cbc"
BLOCK_AES192 = "http://www.w3.org/2001/04/xmlenc#aes192-cbc"
BLOCK_AES256 = "http://www.w3.org/2001/04/xmlenc#aes256-cbc"
DIGEST_RIPEMD160 = "http://www.w3.org/2001/04/xmlenc#ripemd160"
DIGEST_SHA256 = "http://www.w3.org/2001/04/xmlenc#sha256"
DIGEST_SHA512 = "http://www.w3.org/2001/04/xmlenc#sha512"
KA_DH = "http://www.w3.org/2001/04/xmlenc#dh"
KT_RSA_1_5 = "http://www.w3.org/2001/04/xmlenc#rsa-1_5"
KT_RSA_OAEP = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
STREAM_ARCFOUR = "http://www.w3.org/2001/04/xmlenc#arcfour"
WRAP_3DES = "http://www.w3.org/2001/04/xmlenc#kw-3des"
WRAP_AES128 = "http://www.w3.org/2001/04/xmlenc#kw-aes128"
WRAP_AES192 = "http://www.w3.org/2001/04/xmlenc#kw-aes192"
WRAP_AES256 = "http://www.w3.org/2001/04/xmlenc#kw-aes256"
class WSRF_V1_2:
'''OASIS WSRF Specifications Version 1.2
'''
class LIFETIME:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.xsd"
XSD_DRAFT4 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceLifetime-1.2-draft-04.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.wsdl"
WSDL_DRAFT4 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceLifetime-1.2-draft-04.wsdl"
LATEST = WSDL_DRAFT4
WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT4)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT4)
class PROPERTIES:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.xsd"
XSD_DRAFT5 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceProperties-1.2-draft-05.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.wsdl"
WSDL_DRAFT5 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceProperties-1.2-draft-05.wsdl"
LATEST = WSDL_DRAFT5
WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT5)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT5)
class BASENOTIFICATION:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.wsdl"
LATEST = WSDL_DRAFT1
WSDL_LIST = (WSDL_DRAFT1,)
XSD_LIST = (XSD_DRAFT1,)
class BASEFAULTS:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-BaseFaults-1.2-draft-01.xsd"
XSD_DRAFT3 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-BaseFaults-1.2-draft-03.xsd"
#LATEST = DRAFT3
#WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT3)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT3)
WSRF = WSRF_V1_2
WSRFLIST = (WSRF_V1_2,)
class OASIS:
'''URLs for Oasis specifications
'''
WSSE = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
UTILITY = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
class X509TOKEN:
Base64Binary = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary"
STRTransform = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0"
PKCS7 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#PKCS7"
X509 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509"
X509PKIPathv1 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509PKIPathv1"
X509v3SubjectKeyIdentifier = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3SubjectKeyIdentifier"
LIFETIME = WSRF_V1_2.LIFETIME.XSD_DRAFT1
PROPERTIES = WSRF_V1_2.PROPERTIES.XSD_DRAFT1
BASENOTIFICATION = WSRF_V1_2.BASENOTIFICATION.XSD_DRAFT1
BASEFAULTS = WSRF_V1_2.BASEFAULTS.XSD_DRAFT1
class APACHE:
'''This name space is defined by AXIS and it is used for the TC in TCapache.py,
Map and file attachment (DataHandler)
'''
AXIS_NS = "http://xml.apache.org/xml-soap"
class WSTRUST:
BASE = "http://schemas.xmlsoap.org/ws/2004/04/trust"
ISSUE = "http://schemas.xmlsoap.org/ws/2004/04/trust/Issue"
class WSSE:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/secext"
TRUST = WSTRUST.BASE
class WSU:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/utility"
UTILITY = "http://schemas.xmlsoap.org/ws/2002/07/utility"
class WSR:
PROPERTIES = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceProperties"
LIFETIME = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceLifetime"
class WSA200508:
ADDRESS = "http://www.w3.org/2005/08/addressing"
ANONYMOUS = "%s/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200408:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/08/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200403:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200303:
ADDRESS = "http://schemas.xmlsoap.org/ws/2003/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = None
WSA = WSA200408
WSA_LIST = (WSA200508, WSA200408, WSA200403, WSA200303)
class _WSAW(str):
""" Define ADDRESS attribute to be compatible with WSA* layout """
ADDRESS = property(lambda s: s)
WSAW200605 = _WSAW("http://www.w3.org/2006/05/addressing/wsdl")
WSAW_LIST = (WSAW200605,)
class WSP:
POLICY = "http://schemas.xmlsoap.org/ws/2002/12/policy"
class BEA:
SECCONV = "http://schemas.xmlsoap.org/ws/2004/04/sc"
SCTOKEN = "http://schemas.xmlsoap.org/ws/2004/04/security/sc/sct"
class GLOBUS:
SECCONV = "http://wsrf.globus.org/core/2004/07/security/secconv"
CORE = "http://www.globus.org/namespaces/2004/06/core"
SIG = "http://www.globus.org/2002/04/xmlenc#gssapi-sign"
TOKEN = "http://www.globus.org/ws/2004/09/security/sc#GSSAPI_GSI_TOKEN"
ZSI_SCHEMA_URI = 'http://www.zolera.com/schemas/ZSI/'
| gpl-3.0 |
imovedev/cf-php-build-pack-def | lib/build_pack_utils/hashes.py | 22 | 1511 | import hashlib
import logging
from functools import partial
from subprocess import Popen
from subprocess import PIPE
class HashUtil(object):
def __init__(self, config):
self._ctx = config
self._log = logging.getLogger('hashes')
def calculate_hash(self, checkFile):
if checkFile is None or checkFile == '':
return ''
hsh = hashlib.new(self._ctx['CACHE_HASH_ALGORITHM'])
with open(checkFile, 'rb') as fileIn:
for buf in iter(partial(fileIn.read, 8196), ''):
hsh.update(buf)
digest = hsh.hexdigest()
self._log.debug("Hash of [%s] is [%s]", checkFile, digest)
return digest
def does_hash_match(self, digest, toFile):
return (digest.split()[0] == self.calculate_hash(toFile))
class ShaHashUtil(HashUtil):
def __init__(self, config):
HashUtil.__init__(self, config)
def calculate_hash(self, checkFile):
if checkFile is None or checkFile == '':
return ''
proc = Popen(["shasum", "-b",
"-a", self._ctx['CACHE_HASH_ALGORITHM'].lstrip('sha'),
checkFile], stdout=PIPE, stderr=PIPE)
output, err = proc.communicate()
retcode = proc.poll()
if retcode == 0:
digest = output.strip().split(' ')[0]
self._log.debug("Hash of [%s] is [%s]", checkFile, digest)
return digest
elif retcode == 1:
raise ValueError(err.split('\n')[0])
| apache-2.0 |
40223226/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/urllib/parse.py | 735 | 35170 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _hexdig for b in _hexdig}
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-')
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
| gpl-3.0 |
beobal/python-driver | tests/integration/cqlengine/model/test_model_io.py | 7 | 16502 | # Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from uuid import uuid4, UUID
import random
from datetime import datetime, date, time
from decimal import Decimal
from operator import itemgetter
from cassandra.cqlengine import columns
from cassandra.cqlengine import CQLEngineException
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import drop_table
from cassandra.cqlengine.models import Model
from cassandra.util import Date, Time
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class TestModel(Model):
id = columns.UUID(primary_key=True, default=lambda: uuid4())
count = columns.Integer()
text = columns.Text(required=False)
a_bool = columns.Boolean(default=False)
class TestModelIO(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestModelIO, cls).setUpClass()
sync_table(TestModel)
@classmethod
def tearDownClass(cls):
super(TestModelIO, cls).tearDownClass()
drop_table(TestModel)
def test_model_save_and_load(self):
"""
Tests that models can be saved and retrieved
"""
tm = TestModel.create(count=8, text='123456789')
self.assertIsInstance(tm, TestModel)
tm2 = TestModel.objects(id=tm.pk).first()
self.assertIsInstance(tm2, TestModel)
for cname in tm._columns.keys():
self.assertEqual(getattr(tm, cname), getattr(tm2, cname))
def test_model_read_as_dict(self):
"""
Tests that columns of an instance can be read as a dict.
"""
tm = TestModel.create(count=8, text='123456789', a_bool=True)
column_dict = {
'id': tm.id,
'count': tm.count,
'text': tm.text,
'a_bool': tm.a_bool,
}
self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys()))
self.assertSetEqual(set(tm.values()), set(column_dict.values()))
self.assertEqual(
sorted(tm.items(), key=itemgetter(0)),
sorted(column_dict.items(), key=itemgetter(0)))
self.assertEqual(len(tm), len(column_dict))
for column_id in column_dict.keys():
self.assertEqual(tm[column_id], column_dict[column_id])
tm['count'] = 6
self.assertEqual(tm.count, 6)
def test_model_updating_works_properly(self):
"""
Tests that subsequent saves after initial model creation work
"""
tm = TestModel.objects.create(count=8, text='123456789')
tm.count = 100
tm.a_bool = True
tm.save()
tm2 = TestModel.objects(id=tm.pk).first()
self.assertEqual(tm.count, tm2.count)
self.assertEqual(tm.a_bool, tm2.a_bool)
def test_model_deleting_works_properly(self):
"""
Tests that an instance's delete method deletes the instance
"""
tm = TestModel.create(count=8, text='123456789')
tm.delete()
tm2 = TestModel.objects(id=tm.pk).first()
self.assertIsNone(tm2)
def test_column_deleting_works_properly(self):
"""
"""
tm = TestModel.create(count=8, text='123456789')
tm.text = None
tm.save()
tm2 = TestModel.objects(id=tm.pk).first()
self.assertIsInstance(tm2, TestModel)
assert tm2.text is None
assert tm2._values['text'].previous_value is None
def test_a_sensical_error_is_raised_if_you_try_to_create_a_table_twice(self):
"""
"""
sync_table(TestModel)
sync_table(TestModel)
def test_can_insert_model_with_all_column_types(self):
"""
Test for inserting all column types into a Model
test_can_insert_model_with_all_column_types tests that each cqlengine column type can be inserted into a Model.
It first creates a Model that has each cqlengine column type. It then creates a Model instance where all the fields
have corresponding data, which performs the insert into the Cassandra table.
Finally, it verifies that each column read from the Model from Cassandra is the same as the input parameters.
@since 2.6.0
@jira_ticket PYTHON-246
@expected_result The Model is inserted with each column type, and the resulting read yields proper data for each column.
@test_category data_types:primitive
"""
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float(double_precision=False)
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
sync_table(AllDatatypesModel)
input = ['ascii', 2 ** 63 - 1, bytearray(b'hello world'), True, datetime.utcfromtimestamp(872835240),
Decimal('12.3E+7'), 2.39, 3.4028234663852886e+38, '123.123.123.123', 2147483647, 'text',
UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'), UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'),
int(str(2147483647) + '000')]
AllDatatypesModel.create(id=0, a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True,
e=datetime.utcfromtimestamp(872835240), f=Decimal('12.3E+7'), g=2.39,
h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647, k='text',
l=UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'),
m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'))
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first()
for i, i_char in enumerate(range(ord('a'), ord('a') + 14)):
self.assertEqual(input[i], output[chr(i_char)])
def test_can_insert_model_with_all_protocol_v4_column_types(self):
"""
Test for inserting all protocol v4 column types into a Model
test_can_insert_model_with_all_protocol_v4_column_types tests that each cqlengine protocol v4 column type can be
inserted into a Model. It first creates a Model that has each cqlengine protocol v4 column type. It then creates
a Model instance where all the fields have corresponding data, which performs the insert into the Cassandra table.
Finally, it verifies that each column read from the Model from Cassandra is the same as the input parameters.
@since 2.6.0
@jira_ticket PYTHON-245
@expected_result The Model is inserted with each protocol v4 column type, and the resulting read yields proper data for each column.
@test_category data_types:primitive
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
class v4DatatypesModel(Model):
id = columns.Integer(primary_key=True)
a = columns.Date()
b = columns.SmallInt()
c = columns.Time()
d = columns.TinyInt()
sync_table(v4DatatypesModel)
input = [Date(date(1970, 1, 1)), 32523, Time(time(16, 47, 25, 7)), 123]
v4DatatypesModel.create(id=0, a=date(1970, 1, 1), b=32523, c=time(16, 47, 25, 7), d=123)
self.assertEqual(1, v4DatatypesModel.objects.count())
output = v4DatatypesModel.objects().first()
for i, i_char in enumerate(range(ord('a'), ord('a') + 3)):
self.assertEqual(input[i], output[chr(i_char)])
def test_can_insert_double_and_float(self):
"""
Test for inserting single-precision and double-precision values into a Float and Double columns
test_can_insert_double_and_float tests a Float can only hold a single-precision value, unless
"double_precision" attribute is specified as True or is unspecified. This test first tests that an AttributeError
is raised when attempting to input a double-precision value into a single-precision Float. It then verifies that
Double, Float(double_precision=True) and Float() can hold double-precision values by default. It also verifies that
columns.Float(double_precision=False) can hold a single-precision value, and a Double can hold a single-precision value.
@since 2.6.0
@jira_ticket PYTHON-246
@expected_result Each floating point column type is able to hold their respective precision values.
@test_category data_types:primitive
"""
class FloatingPointModel(Model):
id = columns.Integer(primary_key=True)
a = columns.Float(double_precision=False)
b = columns.Float(double_precision=True)
c = columns.Float()
d = columns.Double()
sync_table(FloatingPointModel)
FloatingPointModel.create(id=0, a=2.39)
output = FloatingPointModel.objects().first()
self.assertEqual(2.390000104904175, output.a)
FloatingPointModel.create(id=0, a=3.4028234663852886e+38, b=2.39, c=2.39, d=2.39)
output = FloatingPointModel.objects().first()
self.assertEqual(3.4028234663852886e+38, output.a)
self.assertEqual(2.39, output.b)
self.assertEqual(2.39, output.c)
self.assertEqual(2.39, output.d)
FloatingPointModel.create(id=0, d=3.4028234663852886e+38)
output = FloatingPointModel.objects().first()
self.assertEqual(3.4028234663852886e+38, output.d)
class TestMultiKeyModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer(required=False)
text = columns.Text(required=False)
class TestDeleting(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestDeleting, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(TestDeleting, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def test_deleting_only_deletes_one_object(self):
partition = random.randint(0, 1000)
for i in range(5):
TestMultiKeyModel.create(partition=partition, cluster=i, count=i, text=str(i))
assert TestMultiKeyModel.filter(partition=partition).count() == 5
TestMultiKeyModel.get(partition=partition, cluster=0).delete()
assert TestMultiKeyModel.filter(partition=partition).count() == 4
TestMultiKeyModel.filter(partition=partition).delete()
class TestUpdating(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUpdating, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(TestUpdating, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def setUp(self):
super(TestUpdating, self).setUp()
self.instance = TestMultiKeyModel.create(
partition=random.randint(0, 1000),
cluster=random.randint(0, 1000),
count=0,
text='happy'
)
def test_vanilla_update(self):
self.instance.count = 5
self.instance.save()
check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster)
assert check.count == 5
assert check.text == 'happy'
def test_deleting_only(self):
self.instance.count = None
self.instance.text = None
self.instance.save()
check = TestMultiKeyModel.get(partition=self.instance.partition, cluster=self.instance.cluster)
assert check.count is None
assert check.text is None
def test_get_changed_columns(self):
assert self.instance.get_changed_columns() == []
self.instance.count = 1
changes = self.instance.get_changed_columns()
assert len(changes) == 1
assert changes == ['count']
self.instance.save()
assert self.instance.get_changed_columns() == []
class TestCanUpdate(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestCanUpdate, cls).setUpClass()
drop_table(TestModel)
sync_table(TestModel)
@classmethod
def tearDownClass(cls):
super(TestCanUpdate, cls).tearDownClass()
drop_table(TestModel)
def test_success_case(self):
tm = TestModel(count=8, text='123456789')
# object hasn't been saved,
# shouldn't be able to update
assert not tm._is_persisted
assert not tm._can_update()
tm.save()
# object has been saved,
# should be able to update
assert tm._is_persisted
assert tm._can_update()
tm.count = 200
# primary keys haven't changed,
# should still be able to update
assert tm._can_update()
tm.save()
tm.id = uuid4()
# primary keys have changed,
# should not be able to update
assert not tm._can_update()
class IndexDefinitionModel(Model):
key = columns.UUID(primary_key=True)
val = columns.Text(index=True)
class TestIndexedColumnDefinition(BaseCassEngTestCase):
def test_exception_isnt_raised_if_an_index_is_defined_more_than_once(self):
sync_table(IndexDefinitionModel)
sync_table(IndexDefinitionModel)
class ReservedWordModel(Model):
token = columns.Text(primary_key=True)
insert = columns.Integer(index=True)
class TestQueryQuoting(BaseCassEngTestCase):
def test_reserved_cql_words_can_be_used_as_column_names(self):
"""
"""
sync_table(ReservedWordModel)
model1 = ReservedWordModel.create(token='1', insert=5)
model2 = ReservedWordModel.filter(token='1')
assert len(model2) == 1
assert model1.token == model2[0].token
assert model1.insert == model2[0].insert
class TestQueryModel(Model):
test_id = columns.UUID(primary_key=True, default=uuid4)
date = columns.Date(primary_key=True)
description = columns.Text()
class TestQuerying(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
return
super(TestQuerying, cls).setUpClass()
drop_table(TestQueryModel)
sync_table(TestQueryModel)
@classmethod
def tearDownClass(cls):
if PROTOCOL_VERSION < 4:
return
super(TestQuerying, cls).tearDownClass()
drop_table(TestQueryModel)
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Date query tests require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
def test_query_with_date(self):
uid = uuid4()
day = date(2013, 11, 26)
obj = TestQueryModel.create(test_id=uid, date=day, description=u'foo')
self.assertEqual(obj.description, u'foo')
inst = TestQueryModel.filter(
TestQueryModel.test_id == uid,
TestQueryModel.date == day).limit(1).first()
assert inst.test_id == uid
assert inst.date == day
def test_none_filter_fails():
class NoneFilterModel(Model):
pk = columns.Integer(primary_key=True)
v = columns.Integer()
sync_table(NoneFilterModel)
try:
NoneFilterModel.objects(pk=None)
raise Exception("fail")
except CQLEngineException as e:
pass
| apache-2.0 |
sassoftware/rmake3 | rmake/multinode/workernode.py | 2 | 17206 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrapper around rmake.worker that receives and sends messages to the dispatcher.
"""
import os
import signal
import socket
import time
import traceback
from conary import errors
from rmake import failure
from rmake.build import subscriber
from rmake.lib import logger
from rmake.lib import osutil
from rmake.lib import procutil
from rmake.lib import server
from rmake.lib.apiutils import api, api_parameters, api_return, freeze, thaw
from rmake.worker import command
from rmake.worker import worker
from rmake.messagebus import busclient
from rmake.multinode import messages
from rmake.multinode import nodetypes
from rmake.multinode import nodeclient
class rMakeWorkerNodeServer(worker.Worker):
"""
Class that wraps worker functionality from rmake.worker.worker. Actual
communication w/ messagebus is handled in worker.client
@param cfg: node cfg
@type cfg: rmake_node.nodecfg.NodeConfiguration
@param messageBusInfo: override information for how to get to the
messagebus
@type messageBusInfo: (host, port)
"""
def __init__(self, cfg, messageBusInfo=None):
serverLogger = logger.Logger('rmake-node',
logPath=cfg.logDir + '/rmake-node.log')
try:
serverLogger.info('Starting rMake Node (pid %s)' % os.getpid())
worker.Worker.__init__(self, cfg, serverLogger,
slots=cfg.slots)
#calculates current state of the rmake chroot directory.
chroots = self.listChroots()
self.client = WorkerNodeClient(cfg, self,
procutil.MachineInformation(),
chroots=chroots,
messageBusInfo=messageBusInfo)
self.lastStatusSent = 0
self.statusPeriod = 60
except Exception, err:
self.error('Error initializing Node Server:\n %s\n%s', err,
traceback.format_exc())
raise
def busConnected(self, sessionId):
pass
def receivedResolveCommand(self, info):
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.resolve(info.getResolveJob(), eventHandler, info.getLogData(),
commandId=info.getCommandId())
def receivedActionCommand(self, info):
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.actOnTrove(info.getCommandName(),
info.getBuildConfig(),
info.getJobId(), info.getTrove(),
eventHandler, info.getLogData(),
commandId=info.getCommandId())
def receivedLoadCommand(self, info):
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.loadTroves(info.getJob(), info.getLoadTroves(), eventHandler,
info.getReposName(), commandId=info.getCommandId())
def receivedBuildCommand(self, info):
# allow state changes in the trove before/after we actually fork the
# command
RmakeBusPublisher(info.getJobId(), self.client).attach(info.getTrove())
# create an eventHandler which will take events from the command
# and send them to the messagebus.
eventHandler = DirectRmakeBusPublisher(info.getJobId(), self.client)
self.buildTrove(info.getBuildConfig(), info.getJobId(),
info.getTrove(), eventHandler,
info.getBuildReqs(), info.getCrossReqs(),
info.getTargetLabel(), info.getLogInfo(),
bootstrapReqs=info.getBootstrapReqs(),
builtTroves=info.getBuiltTroves(),
commandId=info.getCommandId())
def receivedStopCommand(self, info):
# pass command on to worknode underneath.
self.stopCommand(commandId=info.getCommandId(),
targetCommandId=info.getTargetCommandId())
def _installSignalHandlers(self):
worker.Worker._installSignalHandlers(self)
# if you kill the dispatcher w/ SIGUSR1 you'll get a breakpoint.
# just let signals be handled normally
def _interrupt(*args, **kw):
import epdb
if hasattr(epdb, 'serve'):
epdb.serve()
else:
epdb.st()
import signal
signal.signal(signal.SIGUSR1, _interrupt)
def _signalHandler(self, signal, frame):
server.Server._signalHandler(self, signal, frame)
os.kill(os.getpid(), signal)
def _serveLoopHook(self):
# Called every .1 seconds or so when polling for
# new requests.
# Sends status update about the machine.
if not self.client.isConnected():
self.client.connect()
return
if (time.time() - self.lastStatusSent) > self.statusPeriod:
if self.client:
self.lastStatusSent = time.time()
info = procutil.MachineInformation()
commandIds = [ x.getCommandId() for x in self.commands]
commandIds += [ x[2][0] for x in self._queuedCommands ]
self.client.updateStatus(info, commandIds)
worker.Worker._serveLoopHook(self)
def handleRequestIfReady(self, sleep):
# override standard worker's poll mechanism to check the bus
# instead.
try:
self.client.poll(timeout=sleep, maxIterations=1)
except socket.error, err:
self.error('Socket connection died: %s' % err.args[1])
time.sleep(sleep)
# passing 0 to tell it we've arleady slept if necessary.
return worker.Worker.handleRequestIfReady(self, 0)
def commandErrored(self, commandId, msg, tb=''):
"""
Called by worker after command finishes with error.
Pass any command errors back to the message bus where they'll
be dealt with.
"""
self.client.commandErrored(commandId, msg, tb)
def commandCompleted(self, commandId):
"""
Called by worker after command finishes with error.
Pass any command errors back to the message bus where they'll
be dealt with.
"""
self.client.commandCompleted(commandId)
class WorkerNodeClient(nodeclient.NodeClient):
"""
Manages worker node's low-level connection to the messagebus.
When it receives messages it parses them and passes the information
up to the WorkerNodeServer. It also accepts commands from
the node server and passes the information back to the
message bus.
Initialization parameters:
@param cfg: node configuration
@param server: rMakeServerClass to call when messages received
@param nodeInfo: procutils.MachineInformation object describing
the current state of the node.
"""
sessionClass = 'WORKER' # type information used by messagebus to classify
# connections.
name = 'rmake-node' # name used by logging
def __init__(self, cfg, server, nodeInfo, chroots, messageBusInfo=None):
# Create a nodeType describing this client that will be passed
# to the message bus and made available to interested listeners
# (like the dispatcher)
node = nodetypes.WorkerNode(name=cfg.name,
host=cfg.hostName,
slots=cfg.slots,
jobTypes=cfg.jobTypes,
buildFlavors=cfg.buildFlavors,
loadThreshold=cfg.loadThreshold,
nodeInfo=nodeInfo, chroots=chroots,
chrootLimit=cfg.chrootLimit)
# grab the message bus location from the rmake server.
from rmake_plugins.multinode_client.server import client
rmakeClient = client.rMakeClient(cfg.rmakeUrl)
if not messageBusInfo:
messageBus = None
while not messageBus:
try:
messageBus = rmakeClient.getMessageBusInfo()
except errors.UncatchableExceptionClasses, e:
raise
except Exception, e:
server.error('Could not contact rmake server at %r - waiting 5 seconds and retrying.', cfg.rmakeUrl)
if not messageBus:
time.sleep(5)
messageBusHost, messageBusPort = messageBus.host, messageBus.port
else:
messageBusHost, messageBusPort = messageBusInfo
nodeclient.NodeClient.__init__(self, messageBusHost,
messageBusPort,
cfg, server, node)
# Never give up on reconnecting to the messagebus, we want
# nodes to keep attempting to reconnect forever.
self.getBusClient().setConnectionTimeout(-1)
def updateStatus(self, info, commandIds):
"""
Send current status of node to messagebus to be picked up
by dispatcher
@param info: current status of this node
@type info: procutil.MachineInformation
"""
m = messages.NodeInfo(info, commandIds)
self.bus.sendMessage('/nodestatus', m)
def messageReceived(self, m):
"""
Direct messages accepted by rMake Node.
@param m: messages.Message subclass.
"""
nodeclient.NodeClient.messageReceived(self, m)
if isinstance(m, messages.ConnectedResponse):
self.bus.subscribe('/command?targetNode=%s' % m.getSessionId())
self.server.busConnected(m.getSessionId())
elif isinstance(m, messages.BuildCommand):
self.server.info('Received build command')
self.server.receivedBuildCommand(m)
elif isinstance(m, messages.ActionCommand):
self.server.info('Received action command')
self.server.receivedActionCommand(m)
elif isinstance(m, messages.StopCommand):
self.server.info('Received stop command')
self.server.receivedStopCommand(m)
elif isinstance(m, messages.ResolveCommand):
self.server.info('Received resolve command')
self.server.receivedResolveCommand(m)
elif isinstance(m, messages.LoadCommand):
self.server.info('Received load command')
self.server.receivedLoadCommand(m)
else:
self.server.info('Received unknown command')
def commandErrored(self, commandId, message, traceback=''):
"""
Send status to messagebus about command commandId
"""
m = messages.CommandStatus()
if not isinstance(message, failure.FailureReason):
failureReason = failure.CommandFailed(commandId, message, traceback)
else:
failureReason = message
m.set(commandId, m.ERROR, failureReason)
self.bus.sendMessage('/commandstatus', m)
def commandCompleted(self, commandId):
"""
Send status to messagebus about worker command commandId
"""
m = messages.CommandStatus()
m.set(commandId, m.COMPLETED)
self.bus.sendMessage('/commandstatus', m)
def emitEvents(self, jobId, eventList):
"""
Send in-progress status updates on events affecting troves
"""
m = messages.EventList()
m.set(jobId, eventList)
# send synchronous message tells the node not to return until
# the messages are sent. We want events to be high-priority
# messages that get
self.bus.sendSynchronousMessage('/event', m)
@api(version=1)
@api_return(1, None)
def listChroots(self, callData):
"""
Part of node XMLRPC interface. List all chroot names
known about for this node.
"""
return self.server.listChroots()
@api(version=1)
@api_return(1, None)
def listCommands(self, callData):
"""
Part of node XMLRPC interface. List all commands that are
Currently queued or active on this node.
"""
return (
[ x.getCommandId() for x in self.server.listQueuedCommands() ],
[ (x.getCommandId(), x.pid) for x in self.server.listCommands() ])
@api(version=1)
@api_parameters(1, 'str', 'str', 'bool', None)
@api_return(1, None)
def startChrootSession(self, callData, chrootPath, command,
superUser=False, buildTrove=None):
"""
Part of rMake node XMLRPC interface. The rMake
server uses these methods to communicate directly to a
node without going through the dispatcher.
Basically a passthrough
to worker.startSession.
Returns (True, (hostName, port)) if the connection succeeds.
Returns (False, FailureReason) if it fails.
"""
if buildTrove:
buildTrove = thaw('BuildTrove', buildTrove)
passed, results = self.server.startSession('_local_', chrootPath,
command, superUser, buildTrove)
if not passed:
results = freeze('FailureReason', results)
return passed, results
@api(version=1)
@api_parameters(1, 'str', 'str')
@api_return(1, None)
def archiveChroot(self, callData, chrootPath, newPath):
"""
Part of rMake node XMLRPC interface. The rMake
server uses these methods to communicate directly to a
node without going through the dispatcher.
"""
return self.server.archiveChroot('_local_', chrootPath, newPath)
@api(version=1)
@api_parameters(1, 'str')
@api_return(1, None)
def deleteChroot(self, callData, chrootPath):
"""
Part of rMake node XMLRPC interface. The rMake
server uses these methods to communicate directly to a
node without going through the dispatcher.
Basically a passthrough to deleteChroot.
"""
return self.server.deleteChroot('_local_', chrootPath)
class WorkerNodeRPCClient(object):
"""
XMLRPC client for communicating to rMake Node.
client: connected messagebus session.
sessionId: sessionId of rMake node to communicate with.
"""
def __init__(self, client, sessionId):
self.proxy = busclient.SessionProxy(WorkerNodeClient, client, sessionId)
def listCommands(self):
return self.proxy.listCommands()
def listChroots(self):
return self.proxy.listChroots()
def getStatus(self):
raise NotImplementError
def startChrootSession(self, chrootPath, command, superUser=False,
buildTrove=None):
"""
Starts a chroot session on the given node.
"""
if buildTrove is None:
buildTrove = ''
else:
buildTrove = freeze('BuildTrove', buildTrove)
return self.proxy.startChrootSession(chrootPath, command, superUser,
buildTrove)
def archiveChroot(self, chrootPath, newPath):
return self.proxy.archiveChroot(chrootPath, newPath)
def deleteChroot(self, chrootPath):
return self.proxy.deleteChroot(chrootPath)
class RmakeBusPublisher(subscriber._RmakePublisherProxy):
"""
Receives events in unfrozen form, freezes them and puts them
on the message bus.
@param jobId: jobId for the events being logged
@param client: WorkerNodeClient instance
"""
def __init__(self, jobId, client):
self.jobId = jobId
self.client = client
subscriber._RmakePublisherProxy.__init__(self)
def _emitEvents(self, apiVer, eventList):
self.client.emitEvents(self.jobId, eventList)
class DirectRmakeBusPublisher(RmakeBusPublisher):
"""
Receives events already frozen and publishes them directly.
Overrides _receiveEvents where events are frozen.
"""
def _freezeEvents(self, apiVer, frozenEventList):
"""
Events on this bus are already frozen (they come from
the command)
"""
return self.jobId, frozenEventList
| apache-2.0 |
Therp/odoo | addons/point_of_sale/controllers/main.py | 20 | 1525 | # -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
import time
import random
import werkzeug.utils
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import module_boot, login_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
if not session.uid:
return login_redirect()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context)
if not pos_session_ids:
return werkzeug.utils.redirect('/web#action=point_of_sale.action_pos_session_opening')
PosSession.login(cr,uid,pos_session_ids,context=context)
modules = simplejson.dumps(module_boot(request.db))
init = """
var wc = new s.web.WebClient();
wc.show_application = function(){
wc.action_manager.do_action("pos.ui");
};
wc.setElement($(document.body));
wc.start();
"""
html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{
'modules': modules,
'init': init,
})
return html
| agpl-3.0 |
windyuuy/opera | chromium/src/chrome/test/pyautolib/history_info.py | 69 | 2330 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""History: python representation for history.
Obtain one of these from PyUITestSuite::GetHistoryInfo() call.
Example:
class MyTest(pyauto.PyUITest):
def testBasic(self):
url = 'http://www.google.com/'
self.NavigateToURL(url)
history = self.GetHistoryInfo()
self.assertEqual(1, len(history))
self.assertEqual(url, history[0]['url'])
See more tests in chrome/test/functional/history.py.
"""
import simplejson as json
from pyauto_errors import JSONInterfaceError
class HistoryInfo(object):
"""Represent info about browsing history.
The info is represented as a list of history items containing url, title,
time, etc.
"""
def __init__(self, history_dict):
"""Initialize a HistoryInfo from a string of json.
Args:
json_string: a dictionary as returned by the IPC command 'GetHistoryInfo'.
A typical dict representing history info looks like:
{'history': [
{'url': 'http://www.google.com/',
'title': 'Google',
...,
...,
}, ] }
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# JSON string prepared in GetHistoryInfo() in automation_provider.cc
self.historydict = history_dict
def History(self):
"""Get history list.
History is ordered latest first, that is in the same order as
chrome://history/ would list.
Example:
[ { u'snippet': u'',
u'starred': False,
u'time': 1271781612,
u'title': u'Google News',
u'url': u'http://news.google.com/'},
{ u'snippet': u'',
u'starred': True,
u'time': 1271781602,
u'title': u'Google',
u'url': u'http://www.google.com/'}]
The snippet attribute will be empty in most cases. If GetHistoryInfo() is
provided a non-empty search_text arg, the snippet attribute will contain the
snippet as it would be visible when searching for that text in the
chrome://history/ UI.
Returns:
[item1, item2, ...]
"""
return self.historydict.get('history', [])
| bsd-3-clause |
mail-apps/translate | docs/_ext/translate_docs.py | 4 | 1084 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Sphinx extension with custom stuff for Translate Toolkit docs."""
import docutils
def setup(app):
# :opt: to mark options -P --pot and options values --progress=dots
app.add_generic_role(
name="opt",
nodeclass=docutils.nodes.literal
)
return {"parallel_read_safe": True}
| gpl-2.0 |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/platform.py | 5 | 49039 | #!/usr/bin/env python3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import collections
import sys, os, re, subprocess
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos', 'win32', 'win16'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
# Directory to search for configuration information on Unix.
# Constant used by test_platform to test linux_distribution().
_UNIXCONFDIR = '/etc'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable, lib='', version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable, 'rb')
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary, pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit, glibc, glibcversion, so, threads, soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib, version
def _dist_try_harder(distname, version, id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag, value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname, version, id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux', pkg[1], id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname, version, id
return distname, version, id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = firstline.strip().split()
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname, version, id) which default to the
args given as parameters.
"""
try:
etc = os.listdir(_UNIXCONFDIR)
except OSError:
# Probably not a Unix system
return distname, version, id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname, dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname, version, id)
# Read the first line
with open(os.path.join(_UNIXCONFDIR, file), 'r',
encoding='utf-8', errors='surrogateescape') as f:
firstline = f.readline()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='', version='', id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname, version, id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = version.split('.')
if build:
l.append(build)
try:
ints = map(int, l)
except ValueError:
strings = l
else:
strings = list(map(str, ints))
version = '.'.join(strings[:3])
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32', 'win16', 'dos')):
""" Tries to figure out the OS version used and returns
a tuple (system, release, version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system, release, version
# Try some common cmd strings
for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise OSError('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except OSError as why:
#print 'Command %s failed: %s' % (cmd, why)
continue
else:
break
else:
return system, release, version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system, release, version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system, release, version
def _win32_getvalue(key, name, default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using winreg
import winreg
RegQueryValueEx = winreg.QueryValueEx
try:
return RegQueryValueEx(key, name)
except:
return default
def win32_ver(release='', version='', csd='', ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version, csd, ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release, version, csd, ptype
else:
# Emulation using winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = winreg.QueryValueEx
RegOpenKeyEx = winreg.OpenKeyEx
RegCloseKey = winreg.CloseKey
HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj, min, buildno, plat, csd = winver
version = '%i.%i.%i' % (maj, min, buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except OSError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj, min)
return release, version, csd, ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release, version, csd, ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('', 1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype, 1))[0]
# Normalize version
version = _norm_version(version, build)
# Close key
RegCloseKey(keyCurVer)
return release, version, csd, ptype
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
with open(fn, 'rb') as f:
pl = plistlib.load(f)
release = pl['ProductVersion']
versioninfo = ('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# Canonical name
machine = 'PowerPC'
return release, versioninfo, machine
def mac_ver(release='', versioninfo=('', '', ''), machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that also doesn't work return the default values
return release, versioninfo, machine
def _java_getprop(name, default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):
""" Version interface for Jython.
Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being
a tuple (vm_name, vm_release, vm_vendor) and osinfo being a
tuple (os_name, os_version, os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release, vendor, vminfo, osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system, release, version):
""" Returns (system, release, version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server', system+release, version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system, release, version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32', 'win16'):
# In case one of the other tricks
system = 'Windows'
return system, release, version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ', '_')
platform = platform.replace('/', '-')
platform = platform.replace('\\', '-')
platform = platform.replace(':', '-')
platform = platform.replace(';', '-')
platform = platform.replace('"', '-')
platform = platform.replace('(', '-')
platform = platform.replace(')', '-')
# No need to report 'unknown' information...
platform = platform.replace('unknown', '')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--', '-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except OSError:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath), os.readlink(filepath)))
return filepath
def _syscmd_uname(option, default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos', 'win32', 'win16'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError, OSError):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target, default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos', 'win32', 'win16'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError, OSError):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('', 'WindowsPE'),
'win16': ('', 'Windows'),
'dos': ('', 'MSDOS'),
}
def architecture(executable=sys.executable, bits='', linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits, linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b, l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits, linkage
if 'executable' not in fileout:
# Format not supported
return bits, linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits, linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system, node, release, version, machine, processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system, node, release, version, machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release, version, csd, ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system, release, version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32', 'win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release, vendor, vminfo, osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p', '')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system, node, release, version,
machine, processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?', re.ASCII)
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)', re.ASCII)
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
elif hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system, node, release, version, machine, processor = uname()
if machine == processor:
processor = ''
if aliased:
system, release, version = system_alias(system, release, version)
if system == 'Windows':
# MS platforms
rel, vers, csd, ptype = win32_ver(version)
if terse:
platform = _platform(system, release)
else:
platform = _platform(system, release, version, csd)
elif system in ('Linux',):
# Linux based systems
distname, distversion, distid = dist('')
if distname and not terse:
platform = _platform(system, release, machine, processor,
'with',
distname, distversion, distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname, libcversion = libc_ver(sys.executable)
platform = _platform(system, release, machine, processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r, v, vminfo, (os_name, os_version, os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system, release, version)
else:
platform = _platform(system, release, version,
'on',
os_name, os_version, os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system, release)
else:
platform = _platform(system, release, machine)
else:
# Generic handler
if terse:
platform = _platform(system, release)
else:
bits, linkage = architecture(sys.executable)
platform = _platform(system, release, machine,
processor, bits, linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased, terse))
sys.exit(0)
| gpl-2.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Mac/Demo/imgbrowse/mac_image.py | 34 | 1777 | """mac_image - Helper routines (hacks) for images"""
import imgformat
from Carbon import Qd
import struct
import MacOS
_fmt_to_mac = {
imgformat.macrgb16 : (16, 16, 3, 5),
}
def mkpixmap(w, h, fmt, data):
"""kludge a pixmap together"""
fmtinfo = _fmt_to_mac[fmt]
rv = struct.pack("lHhhhhhhlllhhhhlll",
id(data)+MacOS.string_id_to_buffer, # HACK HACK!!
w*2 + 0x8000,
0, 0, h, w,
0,
0, 0, # XXXX?
72<<16, 72<<16,
fmtinfo[0], fmtinfo[1],
fmtinfo[2], fmtinfo[3],
0, 0, 0)
## print 'Our pixmap, size %d:'%len(rv)
## dumppixmap(rv)
return Qd.RawBitMap(rv)
def dumppixmap(data):
baseAddr, \
rowBytes, \
t, l, b, r, \
pmVersion, \
packType, packSize, \
hRes, vRes, \
pixelType, pixelSize, \
cmpCount, cmpSize, \
planeBytes, pmTable, pmReserved \
= struct.unpack("lhhhhhhhlllhhhhlll", data)
print 'Base: 0x%x'%baseAddr
print 'rowBytes: %d (0x%x)'%(rowBytes&0x3fff, rowBytes)
print 'rect: %d, %d, %d, %d'%(t, l, b, r)
print 'pmVersion: 0x%x'%pmVersion
print 'packing: %d %d'%(packType, packSize)
print 'resolution: %f x %f'%(float(hRes)/0x10000, float(vRes)/0x10000)
print 'pixeltype: %d, size %d'%(pixelType, pixelSize)
print 'components: %d, size %d'%(cmpCount, cmpSize)
print 'planeBytes: %d (0x%x)'%(planeBytes, planeBytes)
print 'pmTable: 0x%x'%pmTable
print 'pmReserved: 0x%x'%pmReserved
for i in range(0, len(data), 16):
for j in range(16):
if i + j < len(data):
print '%02.2x'%ord(data[i+j]),
print
| gpl-3.0 |
leilihh/nova | nova/objects/quotas.py | 11 | 3927 | # Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base
from nova.objects import fields
from nova import quota
def ids_from_instance(context, instance):
if (context.is_admin and
context.project_id != instance['project_id']):
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
return project_id, user_id
# TODO(lyj): This method needs to be cleaned up once the
# ids_from_instance helper method is renamed or some common
# method is added for objects.quotas.
def ids_from_security_group(context, security_group):
return ids_from_instance(context, security_group)
class Quotas(base.NovaObject):
fields = {
'reservations': fields.ListOfStringsField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
}
def __init__(self):
super(Quotas, self).__init__()
# Set up defaults.
self.reservations = []
self.project_id = None
self.user_id = None
self.obj_reset_changes()
@classmethod
def from_reservations(cls, context, reservations, instance=None):
"""Transitional for compatibility."""
if instance is None:
project_id = None
user_id = None
else:
project_id, user_id = ids_from_instance(context, instance)
quotas = cls()
quotas._context = context
quotas.reservations = reservations
quotas.project_id = project_id
quotas.user_id = user_id
quotas.obj_reset_changes()
return quotas
@base.remotable
def reserve(self, context, expire=None, project_id=None, user_id=None,
**deltas):
reservations = quota.QUOTAS.reserve(context, expire=expire,
project_id=project_id,
user_id=user_id,
**deltas)
self.reservations = reservations
self.project_id = project_id
self.user_id = user_id
self.obj_reset_changes()
@base.remotable
def commit(self, context=None):
if not self.reservations:
return
if context is None:
context = self._context
quota.QUOTAS.commit(context, self.reservations,
project_id=self.project_id,
user_id=self.user_id)
self.reservations = None
self.obj_reset_changes()
@base.remotable
def rollback(self, context=None):
"""Rollback quotas."""
if not self.reservations:
return
if context is None:
context = self._context
quota.QUOTAS.rollback(context, self.reservations,
project_id=self.project_id,
user_id=self.user_id)
self.reservations = None
self.obj_reset_changes()
class QuotasNoOp(Quotas):
def reserve(context, expire=None, project_id=None, user_id=None,
**deltas):
pass
def commit(self, context=None):
pass
def rollback(self, context=None):
pass
| apache-2.0 |
cit563emef2dasdme/jklasjdf12nfasfdkl | obtaning_pmc_oai.py | 1 | 22988 | from flask_script import Manager
from flask import Flask
import os
from lxml import etree
from app.models import db, Literature, Author
from app.models import AuthorLiterature, Cite, Citememe
from datetime import datetime
import re
import spacy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
manager = Manager(app)
article_namespace = "article"
nlp = spacy.load("en")
# local version
def obtaning_data(infile):
nsmap = {}
for event, elem in etree.iterparse(infile, events=('start-ns',)):
ns, url = elem
if ns == "":
ns = article_namespace
nsmap[ns] = url
# print(nsmap)
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(infile, parser)
root = tree.getroot()
literature = Literature()
# To find the pmc_id
pmc_uid = article_id_find(root, "pmc-uid", nsmap)
# print(pmc_uid)
temp_literature = Literature.query.filter_by(pmc_uid=pmc_uid).first()
if temp_literature:
print("Literature has already been updated from pmc, " +
"and the pmc_uid is {0}, the program would not proceed."
.format(pmc_uid))
# print("Testing, stop return.")
return
# To find the pmid
pmid = article_id_find(root, "pmid", nsmap)
# print(pmid)
# Literature already been added as a reference.
# The task next is to update the info for the original reference.
temp_literature = Literature.query.filter_by(pmid=pmid).first()
if temp_literature:
print(
"Literature already been added as a reference, " +
"and the pmid is {0}, and the literature will be updated by pmc data.".format(
temp_literature.pmid
)
)
temp_literature.delete_by_pmid(pmid)
literature.pmid = pmid
literature.pmc_uid = pmc_uid
# The default updated value for PMC literature is True
literature.updated = True
# The default type value for PMC literature is journal
literature.type = "journal"
# To find the journal id
temp_journal_id_list = root.xpath(
"//{0}:journal-id[@journal-id-type=\'nlm-ta\']".format(
article_namespace),
namespaces=nsmap)
if temp_journal_id_list:
journal_id = temp_journal_id_list[0].text
# print(journal_id)
literature.source_id = journal_id
# To find the journal title
temp_journal_title_list = root.xpath(
"//{0}:journal-title".format(article_namespace), namespaces=nsmap)
if temp_journal_title_list:
journal_title = temp_journal_title_list[0].text
# print(journal_title)
literature.source_title = journal_title
# To find the doi
doi = article_id_find(root, "doi", nsmap)
# print(doi)
literature.doi = doi
# To obtain the title
temp_title_list = root.xpath(
"//{0}:title-group/{0}:article-title".format(article_namespace),
namespaces=nsmap)
if temp_title_list:
temp_title = temp_title_list[0]
title = temp_title.text + ''.join(tag.tail for tag in temp_title)
else:
title = None
# print(title)
literature.title = title
# To obtain the epub_date
temp_epub_date_list = root.xpath(
"//{0}:pub-date[@pub-type=\'epub\']".format(article_namespace),
namespaces=nsmap)
if temp_epub_date_list:
day = 1
month = 1
epub_date_etree = etree.ElementTree(temp_epub_date_list[0])
temp_day_list = epub_date_etree.xpath(
"{0}:day".format(article_namespace), namespaces=nsmap)
if temp_day_list:
day = int(temp_day_list[0].text)
temp_month_list = epub_date_etree.xpath(
"{0}:month".format(article_namespace), namespaces=nsmap)
if temp_month_list:
month = int(temp_month_list[0].text)
year_text = epub_date_etree.xpath(
"{0}:year".format(article_namespace), namespaces=nsmap)[0].text
year = int(year_text)
epub_date = datetime(year, month, day)
# print(epub_date)
else:
epub_date = None
literature.pub_date = epub_date
# To obtain the affiliations
affiliations_pair = {}
corresp_text = "corresp"
temp_affiliations_list = root.xpath(
"//{0}:aff".format(article_namespace), namespaces=nsmap)
for i, temp_affiliation in enumerate(temp_affiliations_list):
temp_aff_name_text = temp_affiliation.text
if (temp_aff_name_text is not None) and (temp_aff_name_text.strip() is not ""):
aff_name = re.sub(r' and\s*$|,\s*$', '', temp_aff_name_text)
affiliation_corresp = Affiliation(name=aff_name)
affiliations_pair[corresp_text] = affiliation_corresp
else:
affiliation_tree = etree.ElementTree(temp_affiliation)
temp_affiliation_elements = affiliation_tree.xpath(
"{0}:*".format(article_namespace), namespaces=nsmap)
for j, affiliation_elem in enumerate(temp_affiliation_elements):
temp_aff_name_text = affiliation_elem.tail
if (temp_aff_name_text is not None) and \
(temp_aff_name_text.strip() is not ""):
aff_name = re.sub(r' and\s*$|,\s*$', '',
temp_aff_name_text)
affiliation = Affiliation(name=aff_name)
affiliation_key = temp_affiliation.attrib['id']
affiliations_pair[affiliation_key] = affiliation
else:
temp_aff_name_text = affiliation_elem.text
if (temp_aff_name_text is not None) and (temp_aff_name_text.strip() is not ""):
aff_name = re.sub(r' and\s*$|,\s*$', '',
temp_aff_name_text)
affiliation = Affiliation(name=aff_name)
affiliation_key = temp_affiliation.attrib['id']
affiliations_pair[affiliation_key] = affiliation
del temp_affiliations_list
# To get the authors' data, including affiliations
target_tag = 'contrib'
obtain_data(target_tag, nsmap, infile, obtain_author, affiliations_pair,
literature)
# To obtain the references
citation_type_list = root.xpath(
"//{0}:back/{0}:ref-list/{0}:ref/{0}:*".format(article_namespace),
namespaces=nsmap
)
for citation_type in citation_type_list:
if citation_type.tag == "{" + nsmap[article_namespace] + "}label":
continue
citation_tag = citation_type.tag
citation_tag = citation_tag.replace("{" + nsmap[article_namespace] + "}", "")
# print(citation_tag)
break
del citation_type_list
# To obtain the reference info
target_tag = 'ref'
obtain_data(target_tag, nsmap, infile, obtain_reference,
citation_tag, literature)
# To obtain the citememes
target_tag = 'p'
obtain_data(target_tag, nsmap, infile, obtain_citememe, literature)
db.session.commit()
def article_id_find(tree, article_id_type, nsmap):
path = "//" + article_namespace + \
":article-id[@pub-id-type=\'" + article_id_type + "\']"
# print(path)
temp_list = tree.xpath(path, namespaces=nsmap)
if temp_list:
article_id = temp_list[0].text
else:
article_id = None
return article_id
# This the general method to obtain data (e.g. citememe, reference, author)
def obtain_data(target_tag, namespace, infile, obtain_method, *arguments):
tag = '{' + namespace[article_namespace] + '}' + target_tag
# print(tag)
context = etree.iterparse(infile, events=('end',), tag=tag)
obtain_method(context, namespace, *arguments)
# To obtain the citememes
def obtain_citememe(context, nsmap, *arguments):
citer = arguments[0]
body_parent_xpath = etree.XPath(
"ancestor::{0}:body".format(article_namespace),
namespaces=nsmap
)
bibr_xpath = etree.XPath(
"{0}:xref[@ref-type=\'bibr\']".format(article_namespace),
namespaces=nsmap
)
for event, elem in context:
# Only within the tag <body>, if not continue
if len(body_parent_xpath(elem)) == 0:
continue
bibr_list = bibr_xpath(elem)
former_bibr = None
former_bibr_seq = ""
for bibr in bibr_list:
# print(bibr.attrib["rid"])
bibr.text = ""
if (former_bibr is not None) and (re.match("~.*?~[-–]", former_bibr.tail)):
former_bibr.tail = re.sub("[-–]", "", former_bibr.tail)
end_rid = (bibr.attrib["rid"])
# print(end_rid)
end_cite = Cite.query.filter_by(citer=citer, local_reference_id=end_rid).first()
start_rid_sequence = int(former_bibr_seq)
if end_cite is not None:
end_cite_sequence = end_cite.reference_sequence
for i in range(start_rid_sequence + 1, end_cite_sequence + 1):
former_bibr.tail = former_bibr.tail + "~" + str(i) + "~,"
former_bibr.tail = former_bibr.tail
former_bibr = bibr
continue
rids = (bibr.attrib["rid"]).split(" ")
seqs_str = []
for rid in rids:
cite = Cite.query.filter_by(citer=citer, local_reference_id=rid).first()
if cite is not None:
seq_str = str(cite.reference_sequence)
seqs_str.append(seq_str)
if len(seqs_str) == 0:
print("len(seqs) is 0.")
if bibr.tail is None:
bibr.tail = ""
seqs_text = "~{0}~".format("~~".join(seqs_str))
bibr.tail = seqs_text + bibr.tail
bibr.tail = bibr.tail
# print(bibr.tail)
former_bibr = bibr
former_bibr_seq = seqs_str[-1]
del bibr_list
# the paragraph without outer tag
para_text = etree.tostring(elem, encoding="unicode")
para_text = re.sub("<.*?>", "", para_text)
# eliminate the outer tag
# para_text = re.sub('<p.*?>|</p>\s*$', "", para_text)
para = nlp(para_text)
# To record
former_sent_str = ""
for sent in para.sents:
sent_str = str(sent).strip()
# sent_str do not start with A-Z
if (re.match(r'^[^A-Z]', sent_str)):
sent_str = former_sent_str + sent_str
# print(sent_str)
search_obj = re.findall(r"~.*?~", sent_str)
for so in search_obj:
# print(so.strip("~"))
# sent_str = re.sub("~.*?~", "", sent_str)
reference_sequence = int(so.strip("~"))
# print(local_reference_sequence)
citence = re.sub('\s+', ' ', sent_str)
cite = Cite.query.filter_by(citer=citer, reference_sequence=reference_sequence).first()
citememe = Citememe(
cite=cite,
citence=citence
)
db.session.add(citememe)
del search_obj
former_sent_str = sent_str
del para
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
del context
db.session.commit()
def obtain_reference(context, nsmap, *arguments):
# arguments = citation_tag, citer(literature)
citation_tag, citer = arguments
# print(citation_tag)
citation_type_xpath = etree.XPath(
"{0}:{1}".format(article_namespace, citation_tag),
namespaces=nsmap)
# To find the ref-id, e.g. B1, B2
ref_id_xpath = etree.XPath("@id", namespaces=nsmap)
pmid_xpath = etree.XPath(
"{0}:{1}/{0}:pub-id[@pub-id-type='pmid']".format(
article_namespace, citation_tag
),
namespaces=nsmap
)
article_title_xpath = etree.XPath(
"{0}:{1}/{0}:article-title".format(article_namespace,
citation_tag),
namespaces=nsmap)
source_title_xpath = etree.XPath(
"{0}:{1}/{0}:source".format(article_namespace,
citation_tag),
namespaces=nsmap)
year_xpath = etree.XPath(
"{0}:{1}/{0}:year".format(article_namespace,
citation_tag),
namespaces=nsmap)
volume_xpath = etree.XPath(
"{0}:{1}/{0}:volume".format(article_namespace,
citation_tag),
namespaces=nsmap)
fpage_xpath = etree.XPath(
"{0}:{1}/{0}:fpage".format(article_namespace,
citation_tag),
namespaces=nsmap)
name_xpath = etree.XPath(
"{0}:{1}/{0}:person-group[@person-group-type='author']/{0}:name".format
(article_namespace, citation_tag), namespaces=nsmap
)
surname_xpath = etree.XPath(
"{0}:surname".format(article_namespace), namespaces=nsmap)
given_names_xpath = etree.XPath(
"{0}:given-names".format(article_namespace), namespaces=nsmap)
suffix_xpath = etree.XPath(
"{0}:suffix".format(article_namespace), namespaces=nsmap)
reference_sequence = 0
for event, elem in context:
reference_sequence += 1
reference = Literature()
ref_id_list = ref_id_xpath(elem)
if ref_id_list:
ref_id = ref_id_list[0]
# print(ref_id)
else:
print("ref_id_list is None")
pmid_list = pmid_xpath(elem)
if pmid_list:
pmid = pmid_list[0].text
temp_reference = Literature.query.filter_by(pmid=pmid).first()
if temp_reference:
print(
"Reference already exist. And the reference pmc id is {0}".format(
temp_reference.pmid
)
)
reference = temp_reference
else:
reference.pmid = pmid
reference.updated = False
# print(pmid)
cite = Cite(
citer=citer,
cited=reference,
local_reference_id=ref_id,
reference_sequence=reference_sequence
)
db.session.add(cite)
continue
reference.updated = False
citation_type_list = citation_type_xpath(elem)
if citation_type_list:
citation_type = citation_type_list[0].attrib.values()[0]
reference.type = citation_type
article_titles = article_title_xpath(elem)
if article_titles:
article_title = article_titles[0].text
reference.title = article_title
# print(article_title)
source_titles = source_title_xpath(elem)
if source_titles:
source_title = source_titles[0].text
reference.source_title = source_title
# print(source_title)
years = year_xpath(elem)
if years:
year = years[0].text
reference.pub_year = year
# print(year)
volumes = volume_xpath(elem)
if volumes:
volume = volumes[0].text
reference.volume = volume
# print(volume)
frontpage_list = fpage_xpath(elem)
if frontpage_list:
frontpage = frontpage_list[0].text
reference.fpage = frontpage
# print(frontpage)
name_list = name_xpath(elem)
author_rank = 0
for name in name_list:
author_rank += 1
surname_list = surname_xpath(name)
if surname_list:
surname = surname_list[0].text
# print(surname)
given_names_list = given_names_xpath(name)
if given_names_list:
given_names = given_names_list[0].text
else:
given_names = suffix_xpath(name)[0].text.replace(".", "")
author = Author(surname=surname, given_names=given_names)
author_literature = AuthorLiterature(
author=author, literature=reference, author_rank=author_rank
)
db.session.add(author_literature)
del name_list
cite = Cite(
citer=citer,
cited=reference,
local_reference_id=ref_id,
reference_sequence=reference_sequence
)
db.session.add(cite)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
del context
db.session.commit()
# To obtain the authors' info
def obtain_author(context, nsmap, *arguments):
affiliations_pair, literature = arguments
# To comfirm whether the contributor type is "author"
contrib_type_xpath = etree.XPath("@contrib-type", namespaces=nsmap)
# To confirm whether the author is the corresponding author
corresp_contrib_attrib_xpath = etree.XPath("@corresp", namespaces=nsmap)
# To find <xref ref-type="corresp"
corresp_xref_xpath = etree.XPath(
"{0}:xref[@ref-type=\'corresp\']".format(article_namespace),
namespaces=nsmap)
# To find <xref ref-type="aff"
aff_xref_xpath = etree.XPath(
"{0}:xref[@ref-type=\'aff\']".format(article_namespace),
namespaces=nsmap)
# The surname
surname_xpath = etree.XPath(
"{0}:name/{0}:surname".format(article_namespace), namespaces=nsmap)
# The given names
given_names_xpath = etree.XPath(
"{0}:name/{0}:given-names".format(article_namespace), namespaces=nsmap)
# The email
email_xpath = etree.XPath(
"{0}:email".format(article_namespace), namespaces=nsmap)
# The email in author_notes
email_in_author_notes_xpath = etree.XPath(
"//{0}:author-notes/{0}:corresp/{0}:email".format(article_namespace),
namespaces=nsmap)
# The children nodes
children_nodes_xpath = etree.XPath(
"./{0}:*".format(article_namespace), namespaces=nsmap)
author_rank = 0
for event, elem in context:
if (contrib_type_xpath(elem))[0] != "author":
continue
author = Author()
author_rank += 1
corresponding = bool(0)
# The email
temp_emails = email_xpath(elem)
if temp_emails:
email = temp_emails[0].text
else:
email = None
# To make sure whether the author is the corresponding author
condition1 = bool(0)
if (corresp_contrib_attrib_xpath(elem) and (corresp_contrib_attrib_xpath(elem)[0] == "yes")):
condition1 = bool(1)
condition2 = corresp_xref_xpath(elem)
if (condition1 or condition2):
corresponding = bool(1)
if (corresp_xref_xpath(elem)) and (email is None):
temp_emails = email_in_author_notes_xpath(elem)
if temp_emails:
email = temp_emails[0].text
else:
email = None
author_with_email = None
if email:
author_with_email = Author.query.filter_by(email=email).first()
else:
corresponding = bool(0)
if author_with_email:
author = author_with_email
author_literature = AuthorLiterature(
author=author,
literature=literature,
author_rank=author_rank,
corresponding=corresponding)
db.session.add(author_literature)
continue
else:
author.email = email
# The surnames
temp_surnames = surname_xpath(elem)
if temp_surnames:
surname = temp_surnames[0].text
else:
surname = None
# print(surname)
author.surname = surname
# The given names
temp_given_names = given_names_xpath(elem)
if temp_given_names:
given_names = temp_given_names[0].text
else:
given_names = None
# print(given_names)
author.given_names = given_names
# The affilitions
temp_affilitions = aff_xref_xpath(elem)
if temp_affilitions:
for i, temp_affilition in enumerate(temp_affilitions):
rid = temp_affilition.attrib["rid"]
if (
temp_affilition.text is not None
) and (
temp_affilition.text.startswith("\n") is True
):
temp_affilition = children_nodes_xpath(temp_affilition)[0]
# print(affiliations_pair)
affiliation = affiliations_pair[rid]
author_affiliation = AuthorAffiliation(
author=author, affiliation=affiliation
)
db.session.add(author_affiliation)
# author.affiliations.append(affiliations_pair[rid + "##" +
# temp_affilition.text])
del temp_affilitions
if corresp_xref_xpath(elem):
# add the affiliation for the corresponding author
if ("corresp" in affiliations_pair.keys()) is True:
affiliation = affiliations_pair["corresp"]
author_affiliation = AuthorAffiliation(
author=author, affiliation=affiliation)
db.session.add(author_affiliation)
# author.affiliations.append(affiliations_pair["corresp"])
author_literature = AuthorLiterature(
author=author,
literature=literature,
author_rank=author_rank,
corresponding=corresponding)
db.session.add(author_literature)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
del context
db.session.commit()
'''
# web version
def obtain_data_webfile(pmc_uid):
# later use the configeration file to store the prefix and postfix of query
# query = "https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?
# verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:" \
# + str(pmc_uid) \
# + "&metadataPrefix=pmc"
# page = requests.get(query)
query = "https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?
verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:" \
+ str(pmc_uid) \
+ "&metadataPrefix=pmc"
page = requests.get(query)
tree = etree.fromstring(page.content)
# print(tree.xpath("//" + article_namespace + ":title/text()",
# namespaces=ns))
front = tree.xpath("//" + article_namespace + ":article/front",
namespaces=nsmap)
print(len(front))
'''
| mit |
Onager/plaso | plaso/engine/engine.py | 1 | 15506 | # -*- coding: utf-8 -*-
"""The processing engine."""
import os
from artifacts import errors as artifacts_errors
from artifacts import reader as artifacts_reader
from artifacts import registry as artifacts_registry
from dfvfs.helpers import file_system_searcher
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import sessions
from plaso.engine import artifact_filters
from plaso.engine import filter_file
from plaso.engine import knowledge_base
from plaso.engine import logger
from plaso.engine import path_filters
from plaso.engine import processing_status
from plaso.engine import profilers
from plaso.engine import yaml_filter_file
from plaso.lib import definitions
from plaso.lib import errors
from plaso.preprocessors import manager as preprocess_manager
class BaseEngine(object):
"""Processing engine interface.
Attributes:
collection_filters_helper (CollectionFiltersHelper): collection filters
helper.
knowledge_base (KnowledgeBase): knowledge base.
"""
# The interval of status updates in number of seconds.
_STATUS_UPDATE_INTERVAL = 0.5
_WINDOWS_REGISTRY_FILES_ARTIFACT_NAMES = [
'WindowsSystemRegistryFiles', 'WindowsUserRegistryFiles']
def __init__(self):
"""Initializes an engine."""
super(BaseEngine, self).__init__()
self._abort = False
self._analyzers_profiler = None
self._memory_profiler = None
self._name = 'Main'
self._processing_status = processing_status.ProcessingStatus()
self._processing_profiler = None
self._serializers_profiler = None
self._storage_profiler = None
self._task_queue_profiler = None
self.collection_filters_helper = None
self.knowledge_base = knowledge_base.KnowledgeBase()
def _DetermineOperatingSystem(self, searcher):
"""Tries to determine the underlying operating system.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
Returns:
str: operating system for example "Windows". This should be one of
the values in definitions.OPERATING_SYSTEM_FAMILIES.
"""
find_specs = [
file_system_searcher.FindSpec(
case_sensitive=False, location='/etc',
location_separator='/'),
file_system_searcher.FindSpec(
case_sensitive=False, location='/System/Library',
location_separator='/'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\Windows\\System32',
location_separator='\\'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\WINNT\\System32',
location_separator='\\'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\WINNT35\\System32',
location_separator='\\'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\WTSRV\\System32',
location_separator='\\')]
locations = []
for path_spec in searcher.Find(find_specs=find_specs):
relative_path = searcher.GetRelativePath(path_spec)
if relative_path:
locations.append(relative_path.lower())
# We need to check for both forward and backward slashes since the path
# spec will be OS dependent, as in running the tool on Windows will return
# Windows paths (backward slash) vs. forward slash on *NIX systems.
windows_locations = set([
'/windows/system32', '\\windows\\system32', '/winnt/system32',
'\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32',
'\\wtsrv\\system32', '/wtsrv/system32'])
operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN
if windows_locations.intersection(set(locations)):
operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT
elif '/system/library' in locations:
operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS
elif '/etc' in locations:
operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX
return operating_system
def _StartProfiling(self, configuration):
"""Starts profiling.
Args:
configuration (ProfilingConfiguration): profiling configuration.
"""
if not configuration:
return
if configuration.HaveProfileMemory():
self._memory_profiler = profilers.MemoryProfiler(
self._name, configuration)
self._memory_profiler.Start()
if configuration.HaveProfileAnalyzers():
identifier = '{0:s}-analyzers'.format(self._name)
self._analyzers_profiler = profilers.AnalyzersProfiler(
identifier, configuration)
self._analyzers_profiler.Start()
if configuration.HaveProfileProcessing():
identifier = '{0:s}-processing'.format(self._name)
self._processing_profiler = profilers.ProcessingProfiler(
identifier, configuration)
self._processing_profiler.Start()
if configuration.HaveProfileSerializers():
identifier = '{0:s}-serializers'.format(self._name)
self._serializers_profiler = profilers.SerializersProfiler(
identifier, configuration)
self._serializers_profiler.Start()
if configuration.HaveProfileStorage():
self._storage_profiler = profilers.StorageProfiler(
self._name, configuration)
self._storage_profiler.Start()
if configuration.HaveProfileTaskQueue():
self._task_queue_profiler = profilers.TaskQueueProfiler(
self._name, configuration)
self._task_queue_profiler.Start()
def _StopProfiling(self):
"""Stops profiling."""
if self._memory_profiler:
self._memory_profiler.Stop()
self._memory_profiler = None
if self._analyzers_profiler:
self._analyzers_profiler.Stop()
self._analyzers_profiler = None
if self._processing_profiler:
self._processing_profiler.Stop()
self._processing_profiler = None
if self._serializers_profiler:
self._serializers_profiler.Stop()
self._serializers_profiler = None
if self._storage_profiler:
self._storage_profiler.Stop()
self._storage_profiler = None
if self._task_queue_profiler:
self._task_queue_profiler.Stop()
self._task_queue_profiler = None
@classmethod
def CreateSession(
cls, artifact_filter_names=None, command_line_arguments=None,
debug_mode=False, filter_file_path=None, preferred_encoding='utf-8',
preferred_time_zone=None, preferred_year=None, text_prepend=None):
"""Creates a session attribute container.
Args:
artifact_filter_names (Optional[list[str]]): names of artifact definitions
that are used for filtering file system and Windows Registry
key paths.
command_line_arguments (Optional[str]): the command line arguments.
debug_mode (bool): True if debug mode was enabled.
filter_file_path (Optional[str]): path to a file with find specifications.
preferred_encoding (Optional[str]): preferred encoding.
preferred_time_zone (Optional[str]): preferred time zone.
preferred_year (Optional[int]): preferred year.
text_prepend (Optional[str]): text to prepend to every display name.
Returns:
Session: session attribute container.
"""
session = sessions.Session()
session.artifact_filters = artifact_filter_names
session.command_line_arguments = command_line_arguments
session.debug_mode = debug_mode
session.filter_file = filter_file_path
session.preferred_encoding = preferred_encoding
session.preferred_time_zone = preferred_time_zone
session.preferred_year = preferred_year
session.text_prepend = text_prepend
return session
def GetSourceFileSystem(self, source_path_spec, resolver_context=None):
"""Retrieves the file system of the source.
Args:
source_path_spec (dfvfs.PathSpec): path specifications of the sources
to process.
resolver_context (dfvfs.Context): resolver context.
Returns:
tuple: containing:
dfvfs.FileSystem: file system
path.PathSpec: mount point path specification. The mount point path
specification refers to either a directory or a volume on a storage
media device or image. It is needed by the dfVFS file system
searcher (FileSystemSearcher) to indicate the base location of
the file system.
Raises:
RuntimeError: if source file system path specification is not set.
"""
if not source_path_spec:
raise RuntimeError('Missing source path specification.')
file_system = path_spec_resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=resolver_context)
type_indicator = source_path_spec.type_indicator
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(type_indicator):
mount_point = source_path_spec
else:
mount_point = source_path_spec.parent
return file_system, mount_point
def PreprocessSources(
self, artifacts_registry_object, source_path_specs,
resolver_context=None):
"""Preprocesses the sources.
Args:
artifacts_registry_object (artifacts.ArtifactDefinitionsRegistry):
artifact definitions registry.
source_path_specs (list[dfvfs.PathSpec]): path specifications of
the sources to process.
resolver_context (Optional[dfvfs.Context]): resolver context.
"""
detected_operating_systems = []
for source_path_spec in source_path_specs:
try:
file_system, mount_point = self.GetSourceFileSystem(
source_path_spec, resolver_context=resolver_context)
except (RuntimeError, dfvfs_errors.BackEndError) as exception:
logger.error(exception)
continue
try:
searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
operating_system = self._DetermineOperatingSystem(searcher)
if operating_system != definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN:
preprocess_manager.PreprocessPluginsManager.RunPlugins(
artifacts_registry_object, file_system, mount_point,
self.knowledge_base)
detected_operating_systems.append(operating_system)
finally:
file_system.Close()
if detected_operating_systems:
logger.info('Preprocessing detected operating systems: {0:s}'.format(
', '.join(detected_operating_systems)))
self.knowledge_base.SetValue(
'operating_system', detected_operating_systems[0])
def BuildCollectionFilters(
self, artifact_definitions_path, custom_artifacts_path,
knowledge_base_object, artifact_filter_names=None, filter_file_path=None):
"""Builds collection filters from artifacts or filter file if available.
Args:
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
knowledge_base_object (KnowledgeBase): knowledge base.
artifact_filter_names (Optional[list[str]]): names of artifact
definitions that are used for filtering file system and Windows
Registry key paths.
filter_file_path (Optional[str]): path of filter file.
Raises:
InvalidFilter: if no valid file system find specifications are built.
"""
environment_variables = knowledge_base_object.GetEnvironmentVariables()
if artifact_filter_names:
logger.debug(
'building find specification based on artifacts: {0:s}'.format(
', '.join(artifact_filter_names)))
artifacts_registry_object = BaseEngine.BuildArtifactsRegistry(
artifact_definitions_path, custom_artifacts_path)
self.collection_filters_helper = (
artifact_filters.ArtifactDefinitionsFiltersHelper(
artifacts_registry_object, knowledge_base_object))
self.collection_filters_helper.BuildFindSpecs(
artifact_filter_names, environment_variables=environment_variables)
# If the user selected Windows Registry artifacts we have to ensure
# the Windows Registry files are parsed.
if self.collection_filters_helper.registry_find_specs:
self.collection_filters_helper.BuildFindSpecs(
self._WINDOWS_REGISTRY_FILES_ARTIFACT_NAMES,
environment_variables=environment_variables)
if not self.collection_filters_helper.included_file_system_find_specs:
raise errors.InvalidFilter(
'No valid file system find specifications were built from '
'artifacts.')
elif filter_file_path:
logger.debug(
'building find specification based on filter file: {0:s}'.format(
filter_file_path))
filter_file_path_lower = filter_file_path.lower()
if (filter_file_path_lower.endswith('.yaml') or
filter_file_path_lower.endswith('.yml')):
filter_file_object = yaml_filter_file.YAMLFilterFile()
else:
filter_file_object = filter_file.FilterFile()
filter_file_path_filters = filter_file_object.ReadFromFile(
filter_file_path)
self.collection_filters_helper = (
path_filters.PathCollectionFiltersHelper())
self.collection_filters_helper.BuildFindSpecs(
filter_file_path_filters, environment_variables=environment_variables)
if (not self.collection_filters_helper.excluded_file_system_find_specs and
not self.collection_filters_helper.included_file_system_find_specs):
raise errors.InvalidFilter((
'No valid file system find specifications were built from filter '
'file: {0:s}.').format(filter_file_path))
@classmethod
def BuildArtifactsRegistry(
cls, artifact_definitions_path, custom_artifacts_path):
"""Build Find Specs from artifacts or filter file if available.
Args:
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
Returns:
artifacts.ArtifactDefinitionsRegistry: artifact definitions registry.
Raises:
BadConfigOption: if artifact definitions cannot be read.
"""
if artifact_definitions_path and not os.path.isdir(
artifact_definitions_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(
artifact_definitions_path))
if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))
registry = artifacts_registry.ArtifactDefinitionsRegistry()
reader = artifacts_reader.YamlArtifactsReader()
try:
registry.ReadFromDirectory(reader, artifact_definitions_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(artifact_definitions_path, exception))
if custom_artifacts_path:
try:
registry.ReadFromFile(reader, custom_artifacts_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(custom_artifacts_path, exception))
return registry
| apache-2.0 |
openstack/octavia | octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id.py | 1 | 1075 | # Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""update vip add subnet id
Revision ID: 4fe8240425b4
Revises: 48660b6643f0
Create Date: 2015-07-01 14:27:44.187179
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4fe8240425b4'
down_revision = '48660b6643f0'
def upgrade():
with op.batch_alter_table(u'vip') as batch_op:
batch_op.alter_column(u'network_id', new_column_name=u'subnet_id',
existing_type=sa.String(36))
| apache-2.0 |
ttsubo/ryu | ryu/lib/packet/igmp.py | 19 | 19270 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internet Group Management Protocol(IGMP) packet parser/serializer
RFC 1112
IGMP v1 format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| Type | Unused | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
RFC 2236
IGMP v2 format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Max Resp Time | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
RFC 3376
IGMP v3 Membership Query format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x11 | Max Resp Code | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Resv |S| QRV | QQIC | Number of Sources (N) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
+- -+
| Source Address [2] |
+- . -+
. . .
. . .
+- -+
| Source Address [N] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
IGMP v3 Membership Report format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x22 | Reserved | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Number of Group Records (M) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [1] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [2] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
. . .
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [M] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
where each Group Record has the following internal format:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Record Type | Aux Data Len | Number of Sources (N) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Multicast Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
+- -+
| Source Address [2] |
+- -+
. . .
. . .
. . .
+- -+
| Source Address [N] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Auxiliary Data .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
import six
import struct
from math import trunc
from ryu.lib import addrconv
from ryu.lib import stringify
from ryu.lib.packet import packet_base
from ryu.lib.packet import packet_utils
IGMP_TYPE_QUERY = 0x11
IGMP_TYPE_REPORT_V1 = 0x12
IGMP_TYPE_REPORT_V2 = 0x16
IGMP_TYPE_LEAVE = 0x17
IGMP_TYPE_REPORT_V3 = 0x22
QUERY_RESPONSE_INTERVAL = 10.0
LAST_MEMBER_QUERY_INTERVAL = 1.0
MULTICAST_IP_ALL_HOST = '224.0.0.1'
MULTICAST_MAC_ALL_HOST = '01:00:5e:00:00:01'
# for types of IGMPv3 Report Group Records
MODE_IS_INCLUDE = 1
MODE_IS_EXCLUDE = 2
CHANGE_TO_INCLUDE_MODE = 3
CHANGE_TO_EXCLUDE_MODE = 4
ALLOW_NEW_SOURCES = 5
BLOCK_OLD_SOURCES = 6
class igmp(packet_base.PacketBase):
"""
Internet Group Management Protocol(IGMP, RFC 1112, RFC 2236)
header encoder/decoder class.
http://www.ietf.org/rfc/rfc1112.txt
http://www.ietf.org/rfc/rfc2236.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v2, or a combination of
version and a message type for v1.
maxresp max response time in unit of 1/10 second. it is
meaningful only in Query Message.
csum a check sum value. 0 means automatically-calculate
when encoding.
address a group address value.
=============== ====================================================
"""
_PACK_STR = '!BBH4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
]
}
def __init__(self, msgtype=IGMP_TYPE_QUERY, maxresp=0, csum=0,
address='0.0.0.0'):
super(igmp, self).__init__()
self.msgtype = msgtype
self.maxresp = maxresp
self.csum = csum
self.address = address
@classmethod
def parser(cls, buf):
assert cls._MIN_LEN <= len(buf)
(msgtype, ) = struct.unpack_from('!B', buf)
if (IGMP_TYPE_QUERY == msgtype and
igmpv3_query.MIN_LEN <= len(buf)):
(instance, subclass, rest,) = igmpv3_query.parser(buf)
elif IGMP_TYPE_REPORT_V3 == msgtype:
(instance, subclass, rest,) = igmpv3_report.parser(buf)
else:
(msgtype, maxresp, csum, address
) = struct.unpack_from(cls._PACK_STR, buf)
instance = cls(msgtype, maxresp, csum,
addrconv.ipv4.bin_to_text(address))
subclass = None
rest = buf[cls._MIN_LEN:]
return instance, subclass, rest
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(self._PACK_STR, self.msgtype,
trunc(self.maxresp), self.csum,
addrconv.ipv4.text_to_bin(self.address)))
if self.csum == 0:
self.csum = packet_utils.checksum(hdr)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
class igmpv3_query(igmp):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Query message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v3.
maxresp max response time in unit of 1/10 second.
csum a check sum value. 0 means automatically-calculate
when encoding.
address a group address value.
s_flg when set to 1, routers suppress the timer process.
qrv robustness variable for a querier.
qqic an interval time for a querier in unit of seconds.
num a number of the multicast servers.
srcs a list of IPv4 addresses of the multicast servers.
=============== ====================================================
"""
_PACK_STR = '!BBH4sBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
MIN_LEN = _MIN_LEN
_TYPE = {
'ascii': [
'address'
],
'asciilist': [
'srcs'
]
}
def __init__(self, msgtype=IGMP_TYPE_QUERY, maxresp=100, csum=0,
address='0.0.0.0', s_flg=0, qrv=2, qqic=0, num=0,
srcs=None):
super(igmpv3_query, self).__init__(
msgtype, maxresp, csum, address)
self.s_flg = s_flg
self.qrv = qrv
self.qqic = qqic
self.num = num
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
@classmethod
def parser(cls, buf):
(msgtype, maxresp, csum, address, s_qrv, qqic, num
) = struct.unpack_from(cls._PACK_STR, buf)
s_flg = (s_qrv >> 3) & 0b1
qrv = s_qrv & 0b111
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 4 <= len(buf[offset:])
(src, ) = struct.unpack_from('4s', buf, offset)
srcs.append(addrconv.ipv4.bin_to_text(src))
offset += 4
assert num == len(srcs)
return (cls(msgtype, maxresp, csum,
addrconv.ipv4.bin_to_text(address), s_flg, qrv,
qqic, num, srcs),
None,
buf[offset:])
def serialize(self, payload, prev):
s_qrv = self.s_flg << 3 | self.qrv
buf = bytearray(struct.pack(self._PACK_STR, self.msgtype,
trunc(self.maxresp), self.csum,
addrconv.ipv4.text_to_bin(self.address),
s_qrv, trunc(self.qqic), self.num))
for src in self.srcs:
buf.extend(struct.pack('4s', addrconv.ipv4.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 10, self.num)
if 0 == self.csum:
self.csum = packet_utils.checksum(buf)
struct.pack_into('!H', buf, 2, self.csum)
return six.binary_type(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 4
class igmpv3_report(igmp):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Report message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v3.
csum a check sum value. 0 means automatically-calculate
when encoding.
record_num a number of the group records.
records a list of ryu.lib.packet.igmp.igmpv3_report_group.
None if no records.
=============== ====================================================
"""
_PACK_STR = '!BxH2xH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_class_prefixes = ['igmpv3_report_group']
def __init__(self, msgtype=IGMP_TYPE_REPORT_V3, csum=0, record_num=0,
records=None):
self.msgtype = msgtype
self.csum = csum
self.record_num = record_num
records = records or []
assert isinstance(records, list)
for record in records:
assert isinstance(record, igmpv3_report_group)
self.records = records
@classmethod
def parser(cls, buf):
(msgtype, csum, record_num
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
records = []
while 0 < len(buf[offset:]) and record_num > len(records):
record = igmpv3_report_group.parser(buf[offset:])
records.append(record)
offset += len(record)
assert record_num == len(records)
return (cls(msgtype, csum, record_num, records),
None,
buf[offset:])
def serialize(self, payload, prev):
buf = bytearray(struct.pack(self._PACK_STR, self.msgtype,
self.csum, self.record_num))
for record in self.records:
buf.extend(record.serialize())
if 0 == self.record_num:
self.record_num = len(self.records)
struct.pack_into('!H', buf, 6, self.record_num)
if 0 == self.csum:
self.csum = packet_utils.checksum(buf)
struct.pack_into('!H', buf, 2, self.csum)
return six.binary_type(buf)
def __len__(self):
records_len = 0
for record in self.records:
records_len += len(record)
return self._MIN_LEN + records_len
class igmpv3_report_group(stringify.StringifyMixin):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Report Group Record message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
This is used with ryu.lib.packet.igmp.igmpv3_report.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
type\_ a group record type for v3.
aux_len the length of the auxiliary data.
num a number of the multicast servers.
address a group address value.
srcs a list of IPv4 addresses of the multicast servers.
aux the auxiliary data.
=============== ====================================================
"""
_PACK_STR = '!BBH4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
],
'asciilist': [
'srcs'
]
}
def __init__(self, type_=0, aux_len=0, num=0, address='0.0.0.0',
srcs=None, aux=None):
self.type_ = type_
self.aux_len = aux_len
self.num = num
self.address = address
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
self.aux = aux
@classmethod
def parser(cls, buf):
(type_, aux_len, num, address
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 4 <= len(buf[offset:])
(src, ) = struct.unpack_from('4s', buf, offset)
srcs.append(addrconv.ipv4.bin_to_text(src))
offset += 4
assert num == len(srcs)
aux = None
if aux_len:
(aux, ) = struct.unpack_from('%ds' % (aux_len * 4), buf, offset)
return cls(type_, aux_len, num,
addrconv.ipv4.bin_to_text(address), srcs, aux)
def serialize(self):
buf = bytearray(struct.pack(self._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address)))
for src in self.srcs:
buf.extend(struct.pack('4s', addrconv.ipv4.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 2, self.num)
if self.aux is not None:
mod = len(self.aux) % 4
if mod:
self.aux += bytearray(4 - mod)
self.aux = six.binary_type(self.aux)
buf.extend(self.aux)
if 0 == self.aux_len:
self.aux_len = len(self.aux) // 4
struct.pack_into('!B', buf, 1, self.aux_len)
return six.binary_type(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 4 + self.aux_len * 4
| apache-2.0 |
spiral-project/daybed | daybed/views/tokens.py | 1 | 1475 | from cornice import Service
from daybed.backends.exceptions import CredentialsAlreadyExist
from daybed.tokens import get_hawk_credentials, hmac_digest
from daybed.views.errors import forbidden_view
tokens = Service(name='tokens', path='/tokens', description='Tokens')
token = Service(name='token', path='/token', description='Token')
@tokens.post(permission='post_token')
def post_tokens(request):
"""Creates a new token and store it"""
# If we have an authorization header with the Basic or Token realm
# Use it to derive the key
session_token = None
if request.authorization and \
request.authorization[0] in ["Basic", "Token"]:
session_token = hmac_digest(request.registry.tokenHmacKey,
"%s %s" % request.authorization[:2])
token, credentials = get_hawk_credentials(session_token)
try:
request.db.store_credentials(token, credentials)
except CredentialsAlreadyExist:
request.response.status = "200 OK"
else:
request.response.status = "201 Created"
return {
'token': token,
'credentials': credentials
}
@token.get()
def get_token(request):
if request.credentials_id:
token = request.db.get_token(request.credentials_id)
_, credentials = get_hawk_credentials(token)
return {
'token': token,
'credentials': credentials
}
else:
return forbidden_view(request)
| bsd-3-clause |
adalke/rdkit | rdkit/Chem/Crippen.py | 1 | 6023 | # $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Atom-based calculation of LogP and MR using Crippen's approach
Reference:
S. A. Wildman and G. M. Crippen *JCICS* _39_ 868-873 (1999)
"""
from __future__ import print_function
import os
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import numpy
_smartsPatterns = {}
_patternOrder = []
# this is the file containing the atom contributions
defaultPatternFileName = os.path.join(RDConfig.RDDataDir,'Crippen.txt')
def _ReadPatts(fileName):
""" *Internal Use Only*
parses the pattern list from the data file
"""
patts = {}
order = []
with open(fileName,'r') as f:
lines = f.readlines()
for line in lines:
if line[0] != '#':
splitLine = line.split('\t')
if len(splitLine)>=4 and splitLine[0] != '':
sma = splitLine[1]
if sma!='SMARTS':
sma.replace('"','')
p = Chem.MolFromSmarts(sma)
if p:
if len(splitLine[0])>1 and splitLine[0][1] not in 'S0123456789':
cha = splitLine[0][:2]
else:
cha = splitLine[0][0]
logP = float(splitLine[2])
if splitLine[3] != '':
mr = float(splitLine[3])
else:
mr = 0.0
if cha not in order:
order.append(cha)
l = patts.get(cha,[])
l.append((sma,p,logP,mr))
patts[cha] = l
else:
print('Problems parsing smarts: %s'%(sma))
return order,patts
_GetAtomContribs=rdMolDescriptors._CalcCrippenContribs
def _pyGetAtomContribs(mol,patts=None,order=None,verbose=0,force=0):
""" *Internal Use Only*
calculates atomic contributions to the LogP and MR values
if the argument *force* is not set, we'll use the molecules stored
_crippenContribs value when possible instead of re-calculating.
**Note:** Changes here affect the version numbers of MolLogP and MolMR
as well as the VSA descriptors in Chem.MolSurf
"""
if not force and hasattr(mol,'_crippenContribs'):
return mol._crippenContribs
if patts is None:
patts = _smartsPatterns
order = _patternOrder
nAtoms = mol.GetNumAtoms()
atomContribs = [(0.,0.)]*nAtoms
doneAtoms=[0]*nAtoms
nAtomsFound=0
done = False
for cha in order:
pattVect = patts[cha]
for sma,patt,logp,mr in pattVect:
#print('try:',entry[0])
for match in mol.GetSubstructMatches(patt,False,False):
firstIdx = match[0]
if not doneAtoms[firstIdx]:
doneAtoms[firstIdx]=1
atomContribs[firstIdx] = (logp,mr)
if verbose:
print('\tAtom %d: %s %4.4f %4.4f'%(match[0],sma,logp,mr))
nAtomsFound+=1
if nAtomsFound>=nAtoms:
done=True
break
if done: break
mol._crippenContribs = atomContribs
return atomContribs
def _Init():
global _smartsPatterns,_patternOrder
if _smartsPatterns == {}:
_patternOrder,_smartsPatterns = _ReadPatts(defaultPatternFileName)
def _pyMolLogP(inMol,patts=None,order=None,verbose=0,addHs=1):
""" DEPRECATED
"""
if addHs < 0:
mol = Chem.AddHs(inMol,1)
elif addHs > 0:
mol = Chem.AddHs(inMol,0)
else:
mol = inMol
if patts is None:
global _smartsPatterns,_patternOrder
if _smartsPatterns == {}:
_patternOrder,_smartsPatterns = _ReadPatts(defaultPatternFileName)
patts = _smartsPatterns
order = _patternOrder
atomContribs = _pyGetAtomContribs(mol,patts,order,verbose=verbose)
return numpy.sum(atomContribs,0)[0]
_pyMolLogP.version="1.1.0"
def _pyMolMR(inMol,patts=None,order=None,verbose=0,addHs=1):
""" DEPRECATED
"""
if addHs < 0:
mol = Chem.AddHs(inMol,1)
elif addHs > 0:
mol = Chem.AddHs(inMol,0)
else:
mol = inMol
if patts is None:
global _smartsPatterns,_patternOrder
if _smartsPatterns == {}:
_patternOrder,_smartsPatterns = _ReadPatts(defaultPatternFileName)
patts = _smartsPatterns
order = _patternOrder
atomContribs = _pyGetAtomContribs(mol,patts,order,verbose=verbose)
return numpy.sum(atomContribs,0)[1]
_pyMolMR.version="1.1.0"
MolLogP=lambda *x,**y:rdMolDescriptors.CalcCrippenDescriptors(*x,**y)[0]
MolLogP.version=rdMolDescriptors._CalcCrippenDescriptors_version
MolLogP.__doc__=""" Wildman-Crippen LogP value
Uses an atom-based scheme based on the values in the paper:
S. A. Wildman and G. M. Crippen JCICS 39 868-873 (1999)
**Arguments**
- inMol: a molecule
- addHs: (optional) toggles adding of Hs to the molecule for the calculation.
If true, hydrogens will be added to the molecule and used in the calculation.
"""
MolMR=lambda *x,**y:rdMolDescriptors.CalcCrippenDescriptors(*x,**y)[1]
MolMR.version=rdMolDescriptors._CalcCrippenDescriptors_version
MolMR.__doc__=""" Wildman-Crippen MR value
Uses an atom-based scheme based on the values in the paper:
S. A. Wildman and G. M. Crippen JCICS 39 868-873 (1999)
**Arguments**
- inMol: a molecule
- addHs: (optional) toggles adding of Hs to the molecule for the calculation.
If true, hydrogens will be added to the molecule and used in the calculation.
"""
if __name__=='__main__':
import sys
if len(sys.argv):
ms = []
verbose=0
if '-v' in sys.argv:
verbose=1
sys.argv.remove('-v')
for smi in sys.argv[1:]:
ms.append((smi,Chem.MolFromSmiles(smi)))
for smi,m in ms:
print('Mol: %s'%(smi))
logp = MolLogP(m,verbose=verbose)
print('----')
mr = MolMR(m,verbose=verbose)
print('Res:',logp,mr)
newM = Chem.AddHs(m)
logp = MolLogP(newM,addHs=0)
mr = MolMR(newM,addHs=0)
print('\t',logp,mr)
print('-*-*-*-*-*-*-*-*')
| bsd-3-clause |
google-research/google-research | social_rl/multiagent_tfagents/multiagent_gym_env.py | 1 | 4230 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper providing a multiagent adapter for Gym environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tf_agents.environments import gym_wrapper
from tf_agents.specs import array_spec
from tf_agents.specs.tensor_spec import BoundedTensorSpec
from tf_agents.trajectories import time_step as ts_lib
from tf_agents.utils import nest_utils
class MultiagentGymWrapper(gym_wrapper.GymWrapper):
"""Wrapper implementing PyEnvironment interface for multiagent gym envs.
Reward spec is generated based on the number of agents.
Action and observation specs are automatically generated from the action and
observation spaces of the underlying environment. The expectation is that the
first dimension of the environment specs will be the number of agents.
"""
def __init__(self,
gym_env,
n_agents,
discount=1.0,
spec_dtype_map=None,
match_obs_space_dtype=True,
auto_reset=True,
simplify_box_bounds=True):
self.n_agents = n_agents
super(MultiagentGymWrapper, self).__init__(
gym_env, discount, spec_dtype_map, match_obs_space_dtype, auto_reset,
simplify_box_bounds)
# Create a single-agent version of the action spec and then tile it to
# comply with tf-agents spec requirements.
single_action_spec = BoundedTensorSpec(
shape=(), dtype=self._action_spec.dtype, name=self._action_spec.name,
minimum=self._action_spec.minimum, maximum=self._action_spec.maximum)
self._action_spec = (single_action_spec,) * n_agents
def reward_spec(self):
"""Defines a vector reward based on the number of agents.
Returns:
An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.
"""
if self._gym_env.minigrid_mode:
return array_spec.ArraySpec(shape=(), dtype=np.float32, name='reward')
else:
return array_spec.ArraySpec(shape=(self.n_agents,), dtype=np.float32,
name='reward')
def _reset(self):
observation = self._gym_env.reset()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
reset_step = ts_lib.restart(observation, reward_spec=self.reward_spec())
return reset_step
def _step(self, action):
# Automatically reset the environments on step if they need to be reset.
if self._handle_auto_reset and self._done:
return self.reset()
# Some environments (e.g. FrozenLake) use the action as a key to the
# transition probability so it has to be hashable. In the case of discrete
# actions we have a numpy scalar (e.g array(2)) which is not hashable
# in this case, we simply pull out the scalar value which will be hashable.
try:
action = action.item() if self._action_is_discrete else action
except AttributeError:
action = action[0] # Remove ListWrapper for single-agent compatibility
observation, reward, self._done, self._info = self._gym_env.step(action)
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
reward = np.asarray(reward, dtype=self.reward_spec().dtype)
outer_dims = nest_utils.get_outer_array_shape(reward, self.reward_spec())
if self._done:
return ts_lib.termination(observation, reward, outer_dims=outer_dims)
else:
return ts_lib.transition(observation, reward, self._discount,
outer_dims=outer_dims)
| apache-2.0 |
amousset/ansible | test/units/plugins/strategies/test_strategy_base.py | 81 | 14839 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.strategies import StrategyBase
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
from six.moves import queue as Queue
from units.mock.loader import DictDataLoader
class TestStrategyBase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strategy_base_init(self):
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = MagicMock()
mock_tqm._options = MagicMock()
strategy_base = StrategyBase(tqm=mock_tqm)
def test_strategy_base_run(self):
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = MagicMock()
mock_tqm._stats = MagicMock()
mock_tqm.send_callback.return_value = None
mock_iterator = MagicMock()
mock_iterator._play = MagicMock()
mock_iterator._play.handlers = []
mock_play_context = MagicMock()
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm._options = MagicMock()
strategy_base = StrategyBase(tqm=mock_tqm)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), 0)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), 1)
mock_tqm._failed_hosts = dict(host1=True)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), 2)
mock_tqm._unreachable_hosts = dict(host1=True)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), 3)
def test_strategy_base_get_hosts(self):
mock_hosts = []
for i in range(0, 5):
mock_host = MagicMock()
mock_host.name = "host%02d" % (i+1)
mock_hosts.append(mock_host)
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = mock_hosts
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
mock_tqm.get_inventory.return_value = mock_inventory
mock_play = MagicMock()
mock_play.hosts = ["host%02d" % (i+1) for i in range(0, 5)]
strategy_base = StrategyBase(tqm=mock_tqm)
mock_tqm._failed_hosts = []
mock_tqm._unreachable_hosts = []
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts)
mock_tqm._failed_hosts = ["host01"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[1:])
self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0]])
mock_tqm._unreachable_hosts = ["host02"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
def test_strategy_base_queue_task(self):
fake_loader = DictDataLoader()
workers = []
for i in range(0, 3):
worker_main_q = MagicMock()
worker_main_q.put.return_value = None
worker_result_q = MagicMock()
workers.append([i, worker_main_q, worker_result_q])
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
mock_tqm.get_workers.return_value = workers
mock_tqm.get_loader.return_value = fake_loader
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._cur_worker = 0
strategy_base._pending_results = 0
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 1)
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 2)
self.assertEqual(strategy_base._pending_results, 2)
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 0)
self.assertEqual(strategy_base._pending_results, 3)
workers[0][1].put.side_effect = EOFError
strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 3)
def test_strategy_base_process_pending_results(self):
mock_tqm = MagicMock()
mock_tqm._terminated = False
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm.send_callback.return_value = None
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._stats.increment.return_value = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_host = MagicMock()
mock_host.name = 'test01'
mock_host.vars = dict()
mock_task = MagicMock()
mock_task._role = None
mock_task.ignore_errors = False
mock_group = MagicMock()
mock_group.add_host.return_value = None
def _get_host(host_name):
if host_name == 'test01':
return mock_host
return None
def _get_group(group_name):
if group_name in ('all', 'foo'):
return mock_group
return None
mock_inventory = MagicMock()
mock_inventory._hosts_cache = dict()
mock_inventory.get_host.side_effect = _get_host
mock_inventory.get_group.side_effect = _get_group
mock_inventory.clear_pattern_cache.return_value = None
mock_var_mgr = MagicMock()
mock_var_mgr.set_host_variable.return_value = None
mock_var_mgr.set_host_facts.return_value = None
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._variable_manager = mock_var_mgr
strategy_base._blocked_hosts = dict()
strategy_base._notified_handlers = dict()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
task_result = TaskResult(host=mock_host, task=mock_task, return_data=dict(changed=True))
queue_items.append(('host_task_ok', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
task_result = TaskResult(host=mock_host, task=mock_task, return_data='{"failed":true}')
queue_items.append(('host_task_failed', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._failed_hosts)
del mock_tqm._failed_hosts['test01']
task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}')
queue_items.append(('host_unreachable', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._unreachable_hosts)
del mock_tqm._unreachable_hosts['test01']
task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}')
queue_items.append(('host_task_skipped', task_result))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
queue_items.append(('add_host', dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
self.assertIn('test01', strategy_base._blocked_hosts)
queue_items.append(('add_group', mock_host, dict(add_group=dict(group_name='foo'))))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
self.assertIn('test01', strategy_base._blocked_hosts)
task_result = TaskResult(host=mock_host, task=mock_task, return_data=dict(changed=True))
queue_items.append(('notify_handler', task_result, 'test handler'))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
self.assertIn('test01', strategy_base._blocked_hosts)
self.assertIn('test handler', strategy_base._notified_handlers)
self.assertIn(mock_host, strategy_base._notified_handlers['test handler'])
queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
results = strategy_base._process_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
self.assertEqual(strategy_base._pending_results, 1)
queue_items.append(('bad'))
self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
def test_strategy_base_load_included_file(self):
fake_loader = DictDataLoader({
"test.yml": """
- debug: msg='foo'
""",
"bad.yml": """
""",
})
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._loader = fake_loader
mock_play = MagicMock()
mock_block = MagicMock()
mock_block._play = mock_play
mock_block.vars = dict()
mock_task = MagicMock()
mock_task._block = mock_block
mock_task._role = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_inc_file = MagicMock()
mock_inc_file._task = mock_task
mock_inc_file._filename = "test.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
mock_inc_file._filename = "bad.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(res, [])
def test_strategy_base_run_handlers(self):
workers = []
for i in range(0, 3):
worker_main_q = MagicMock()
worker_main_q.put.return_value = None
worker_result_q = MagicMock()
workers.append([i, worker_main_q, worker_result_q])
mock_tqm = MagicMock()
mock_tqm._final_q = MagicMock()
mock_tqm.get_workers.return_value = workers
mock_tqm.send_callback.return_value = None
mock_play_context = MagicMock()
mock_handler_task = MagicMock()
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
mock_handler = MagicMock()
mock_handler.block = [mock_handler_task]
mock_handler.flag_for_host.return_value = False
mock_play = MagicMock()
mock_play.handlers = [mock_handler]
mock_host = MagicMock()
mock_host.name = "test01"
mock_iterator = MagicMock()
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = [mock_host]
mock_var_mgr = MagicMock()
mock_var_mgr.get_vars.return_value = dict()
mock_iterator = MagicMock
mock_iterator._play = mock_play
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._notified_handlers = {"test handler": [mock_host]}
result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
| gpl-3.0 |
OpusVL/odoo | addons/fleet/__openerp__.py | 53 | 2482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Fleet Management',
'version' : '0.1',
'author' : 'OpenERP S.A.',
'sequence': 110,
'category': 'Managing vehicles and contracts',
'website' : 'https://www.odoo.com/page/fleet',
'summary' : 'Vehicle, leasing, insurances, costs',
'description' : """
Vehicle, leasing, insurances, cost
==================================
With this module, OpenERP helps you managing all your vehicles, the
contracts associated to those vehicle as well as services, fuel log
entries, costs and many other features necessary to the management
of your fleet of vehicle(s)
Main Features
-------------
* Add vehicles to your fleet
* Manage contracts for vehicles
* Reminder when a contract reach its expiration date
* Add services, fuel log entry, odometer values for all vehicles
* Show all costs associated to a vehicle or to a type of service
* Analysis graph for costs
""",
'depends' : [
'base',
'mail',
'board'
],
'data' : [
'security/fleet_security.xml',
'security/ir.model.access.csv',
'fleet_view.xml',
'fleet_cars.xml',
'fleet_data.xml',
'fleet_board_view.xml',
],
'images': ['images/costs_analysis.jpeg','images/indicative_costs_analysis.jpeg','images/vehicles.jpeg','images/vehicles_contracts.jpeg','images/vehicles_fuel.jpeg','images/vehicles_odometer.jpeg','images/vehicles_services.jpeg'],
'demo': ['fleet_demo.xml'],
'installable' : True,
'application' : True,
}
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.