repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
fairyzoro/python
|
refs/heads/master
|
Jimmy66/0014/0014.py
|
34
|
#!/bin/env python
# -*- coding: utf-8 -*-
#导入模块
import simplejson as json
import xlwt
#从文件(JSON形式)中读取数据返回字典
def read_file(filename):
with open(filename,'r') as fp:
content = fp.read()
#simplejson这个模块还没细看,怎么解码还是需要了解下
d = json.JSONDecoder().decode(content)
return d
#生成对应的xls文件
def gen_xls(d,filename):
fp = xlwt.Workbook()
table = fp.add_sheet('student',cell_overwrite_ok=True)
#试了下,与很多要转utf-8(ASCII码)存文件的情况不同,xls不接受ASCII码形式的存储,直接用字典里面的Unicode就行了,简直好评,不用在特意decode或者encode了
#想写得更加自动化一些,好复用.本身不太想用两层循环的,不过也不知道有没有更便捷的存储方式(比如整行自动匹配导入,算法是背后优化封装好的,就用了万能的这种方法)
for n in range(len(d)):
table.write(n,0,n+1)
m = 0
for record in d[str(n+1)]:
table.write(n,m+1,record)
m += 1
fp.save('student.xls')
print '写入完毕'
#主函数,嘛,最后还是用“丑陋的二重循环”实现了,但是其实也没什么,还是要看场景和优化,毕竟这也不是做查找或者排序,在日常使用中也不用太担心性能问题
def main():
filename = 'student.txt'
xls_name = 'student.xls'
d = read_file(filename)
gen_xls(d,xls_name)
if __name__ == '__main__':
main()
|
JimCircadian/ansible
|
refs/heads/devel
|
lib/ansible/utils/module_docs_fragments/keycloak.py
|
43
|
# Copyright (c) 2017 Eike Frost <ei@kefro.st>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
auth_keycloak_url:
description:
- URL to the Keycloak instance.
required: true
aliases:
- url
auth_client_id:
description:
- OpenID Connect I(client_id) to authenticate to the API with.
default: admin-cli
required: true
auth_realm:
description:
- Keycloak realm name to authenticate to for API access.
required: true
auth_client_secret:
description:
- Client Secret to use in conjunction with I(auth_client_id) (if required).
auth_username:
description:
- Username to authenticate for API access with.
required: true
aliases:
- username
auth_password:
description:
- Password to authenticate for API access with.
required: true
aliases:
- password
validate_certs:
description:
- Verify TLS certificates (do not disable this in production).
default: True
type: bool
'''
|
lmprice/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/group_by.py
|
31
|
# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
class ActionModule(ActionBase):
''' Create inventory groups based on variables '''
# We need to be able to modify the inventory
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if 'key' not in self._task.args:
result['failed'] = True
result['msg'] = "the 'key' param is required when using group_by"
return result
group_name = self._task.args.get('key')
parent_groups = self._task.args.get('parents', ['all'])
if isinstance(parent_groups, string_types):
parent_groups = [parent_groups]
result['changed'] = False
result['add_group'] = group_name.replace(' ', '-')
result['parent_groups'] = [name.replace(' ', '-') for name in parent_groups]
return result
|
henocdz/python-social-auth
|
refs/heads/master
|
social/pipeline/__init__.py
|
72
|
DEFAULT_AUTH_PIPELINE = (
# Get the information we can about the user and return it in a simple
# format to create the user instance later. On some cases the details are
# already part of the auth response from the provider, but sometimes this
# could hit a provider API.
'social.pipeline.social_auth.social_details',
# Get the social uid from whichever service we're authing thru. The uid is
# the unique identifier of the given user in the provider.
'social.pipeline.social_auth.social_uid',
# Verifies that the current auth process is valid within the current
# project, this is were emails and domains whitelists are applied (if
# defined).
'social.pipeline.social_auth.auth_allowed',
# Checks if the current social-account is already associated in the site.
'social.pipeline.social_auth.social_user',
# Make up a username for this person, appends a random string at the end if
# there's any collision.
'social.pipeline.user.get_username',
# Send a validation email to the user to verify its email address.
# 'social.pipeline.mail.mail_validation',
# Associates the current social details with another user account with
# a similar email address.
# 'social.pipeline.social_auth.associate_by_email',
# Create a user account if we haven't found one yet.
'social.pipeline.user.create_user',
# Create the record that associated the social account with this user.
'social.pipeline.social_auth.associate_user',
# Populate the extra_data field in the social record with the values
# specified by settings (and the default ones like access_token, etc).
'social.pipeline.social_auth.load_extra_data',
# Update the user record with any changed info from the auth service.
'social.pipeline.user.user_details'
)
DEFAULT_DISCONNECT_PIPELINE = (
# Verifies that the social association can be disconnected from the current
# user (ensure that the user login mechanism is not compromised by this
# disconnection).
'social.pipeline.disconnect.allowed_to_disconnect',
# Collects the social associations to disconnect.
'social.pipeline.disconnect.get_entries',
# Revoke any access_token when possible.
'social.pipeline.disconnect.revoke_tokens',
# Removes the social associations.
'social.pipeline.disconnect.disconnect'
)
|
oss/shrunk
|
refs/heads/master
|
backend/setup.py
|
1
|
#!/usr/bin/env python3
from typing import Any
import fnmatch
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as _build_py
VERSION = '2.0.0'
AUTHOR = 'Rutgers Open System Solutions'
class build_py(_build_py):
EXCLUDED = ['shrunk/config.py']
def find_package_modules(self, package: Any, package_dir: Any) -> Any:
modules = super().find_package_modules(package, package_dir)
return (mod for mod in modules if not any(fnmatch.fnmatchcase(mod[2], pat) for pat in self.EXCLUDED))
CMDCLASS = {
'build_py': build_py,
}
COMMAND_OPTIONS = {}
try:
from sphinx.setup_command import BuildDoc
CMDCLASS['build_sphinx'] = BuildDoc # type: ignore
COMMAND_OPTIONS['build_sphinx'] = {
'project': ('setup.py', 'shrunk'),
'version': ('setup.py', VERSION),
'source_dir': ('setup.py', 'doc'),
}
except ImportError:
pass
with open('requirements.txt', 'r') as f:
requires = [line.rstrip() for line in f]
with open('../README.md', 'r') as f:
readme = f.read()
setup(
name='shrunk',
version=VERSION,
packages=find_packages(),
package_data={'shrunk': ['static/dist/*', 'static/img/*', 'static/css/*', 'templates/*']},
include_package_data=True,
zip_safe=False,
install_requires=requires,
author=AUTHOR,
author_email='oss@oss.rutgers.edu',
description='Rutgers University URL Shortener',
long_description=readme,
long_description_content_type='text/markdown',
keywords='shrunk rutgers url shortener',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
url='https://github.com/oss/shrunk',
command_options=COMMAND_OPTIONS,
cmdclass=CMDCLASS,
)
|
joakim-hove/django
|
refs/heads/master
|
tests/test_utils/views.py
|
481
|
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template import Context, Template
from .models import Person
def get_person(request, pk):
person = get_object_or_404(Person, pk=pk)
return HttpResponse(person.name)
def no_template_used(request):
template = Template("This is a string-based template")
return HttpResponse(template.render(Context({})))
def empty_response(request):
return HttpResponse('')
|
IKholopov/HackUPC2017
|
refs/heads/master
|
hackupc/env/lib/python3.5/site-packages/django/contrib/gis/geometry/regex.py
|
657
|
import re
# Regular expression for recognizing HEXEWKB and WKT. A prophylactic measure
# to prevent potentially malicious input from reaching the underlying C
# library. Not a substitute for good Web security programming practices.
hex_regex = re.compile(r'^[0-9A-F]+$', re.I)
wkt_regex = re.compile(r'^(SRID=(?P<srid>\-?\d+);)?'
r'(?P<wkt>'
r'(?P<type>POINT|LINESTRING|LINEARRING|POLYGON|MULTIPOINT|'
r'MULTILINESTRING|MULTIPOLYGON|GEOMETRYCOLLECTION)'
r'[ACEGIMLONPSRUTYZ\d,\.\-\(\) ]+)$',
re.I)
json_regex = re.compile(r'^(\s+)?\{.*}(\s+)?$', re.DOTALL)
|
zolegus/neon
|
refs/heads/master
|
neon/backends/tests/test_randomstate.py
|
10
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
from neon.backends import gen_backend
from neon.backends.tests.utils import assert_tensors_allclose
def test_gpu_randomstate():
# run 1
be = gen_backend(backend='gpu', rng_seed=100)
a = be.empty((3, 3))
a[:] = be.rand() # gpu rand
x0 = a.get()
x1 = be.rng.rand(3, 3) # host rand
a[:] = be.rand() # gpu rand
x2 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
x3 = a.get()
assert len(be.context_rand_state_map) == 1 and len(be.context_rand_state_alive) == 1
for ctx in be.context_rand_state_alive:
assert be.context_rand_state_alive[ctx] is True
# run 2, using reset
be.rng_reset()
for ctx in be.context_rand_state_alive:
assert be.context_rand_state_alive[ctx] is False
a[:] = be.rand()
y0 = a.get()
y1 = be.rng.rand(3, 3)
a[:] = be.rand()
y2 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
y3 = a.get()
assert len(be.context_rand_state_map) == 1 and len(be.context_rand_state_alive) == 1
for ctx in be.context_rand_state_alive:
assert be.context_rand_state_alive[ctx] is True
# run 3, using a new backend
be = gen_backend(backend='gpu', rng_seed=100)
a = be.empty((3, 3))
a[:] = be.rand() # gpu rand
z0 = a.get()
z1 = be.rng.rand(3, 3) # host rand
a[:] = be.rand() # gpu rand
z2 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
z3 = a.get()
# check equality
assert_tensors_allclose([x0, x1, x2, x3], [y0, y1, y2, y3], rtol=0., atol=0.)
assert_tensors_allclose([x0, x1, x2, x3], [z0, z1, z2, z3], rtol=0., atol=0.)
def test_cpu_randomstate():
# run 1
be = gen_backend(backend='cpu', rng_seed=100)
a = be.empty((3, 3))
be.make_binary_mask(a, keepthresh=be.rng.rand())
x0 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
x1 = a.get()
# run 2, using reset
be.rng_reset()
be.make_binary_mask(a, keepthresh=be.rng.rand())
y0 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
y1 = a.get()
# run 3, using a new backend
be = gen_backend(backend='cpu', rng_seed=100)
a = be.empty((3, 3))
be.make_binary_mask(a, keepthresh=be.rng.rand())
z0 = a.get()
be.make_binary_mask(a, keepthresh=be.rng.rand())
z1 = a.get()
# check equality
# import ipdb; ipdb.set_trace()
assert_tensors_allclose([x0, x1], [y0, y1], rtol=0., atol=0.)
assert_tensors_allclose([x0, x1], [z0, z1], rtol=0., atol=0.)
|
mstriemer/zamboni
|
refs/heads/master
|
mkt/constants/ratingsbodies.py
|
13
|
# -*- coding: utf-8 -*-
from tower import ugettext_lazy as _lazy
NAME_GENERAL = _lazy('For all ages')
# L10n: %d is the age in years. For ages %d and higher.
NAME_LAZY = _lazy('For ages %d+') # Fill this in after accessing.
NAME_REJECTED = _lazy(u'Rating Rejected')
NAME_PENDING = _lazy(u'Rating Pending')
class RATING(object):
"""
Content rating.
iarc_name -- how IARC names the rating, to talk with IARC.
age -- minimum age of the rating's age recommendation.
name -- how we name the rating, for translated display on all pages.
label -- for CSS classes, to create icons.
"""
age = None
name = None
label = None
adult = False
class RATING_BODY(object):
"""
Content rating body.
iarc_name -- how IARC names the ratings body, to talk with IARC.
ratings -- list of RATINGs associated with this body.
name -- for general translated display on all pages.
label -- for CSS classes, to create icons.
description -- for general translated display on all pages.
full_name -- in case we ever want to display the full translated name.
url -- in case we ever want to link to the ratings body page for more info.
"""
label = None
class CLASSIND_L(RATING):
id = 0
age = 0
iarc_name = 'Livre'
class CLASSIND_10(RATING):
id = 1
age = 10
iarc_name = '10+'
class CLASSIND_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class CLASSIND_14(RATING):
id = 3
age = 14
iarc_name = '14+'
class CLASSIND_16(RATING):
id = 4
age = 16
iarc_name = '16+'
class CLASSIND_18(RATING):
id = 5
age = 18
iarc_name = '18+'
adult = True
class CLASSIND(RATING_BODY):
"""
The Brazilian game ratings body (aka. DEJUS, DJCTQ).
"""
id = 0
iarc_name = 'CLASSIND'
ratings = (CLASSIND_L, CLASSIND_10, CLASSIND_12, CLASSIND_14, CLASSIND_16,
CLASSIND_18)
name = 'CLASSIND'
description = _lazy(u'Brazil')
full_name = _lazy(u'Department of Justice, Rating, Titles and '
u'Qualification')
url = ('http://portal.mj.gov.br/classificacao/data/Pages/'
'MJ6BC270E8PTBRNN.htm')
class GENERIC_3(RATING):
id = 0
age = 3
iarc_name = '3+'
class GENERIC_7(RATING):
id = 1
age = 7
iarc_name = '7+'
class GENERIC_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class GENERIC_16(RATING):
id = 3
age = 16
iarc_name = '16+'
class GENERIC_18(RATING):
id = 4
age = 18
iarc_name = '18+'
adult = True
class GENERIC_RP(RATING):
id = 5
iarc_name = 'RP'
label = 'pending'
name = NAME_PENDING
class GENERIC(RATING_BODY):
"""
The generic game ratings body (used in Germany, for example).
"""
id = 1
iarc_name = 'Generic'
ratings = (GENERIC_3, GENERIC_7, GENERIC_12, GENERIC_16, GENERIC_18,
GENERIC_RP)
name = _lazy('Generic')
description = '' # No comment.
full_name = _lazy(u'Generic')
class USK_0(RATING):
id = 0
age = 0
iarc_name = '0+'
class USK_6(RATING):
id = 1
age = 6
iarc_name = '6+'
class USK_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class USK_16(RATING):
id = 3
age = 16
iarc_name = '16+'
class USK_18(RATING):
id = 4
age = 18
iarc_name = '18+'
adult = True
class USK_REJECTED(RATING):
id = 5
iarc_name = 'Rating Refused'
label = 'rating-refused'
name = NAME_REJECTED
class USK(RATING_BODY):
"""
The organization responsible for game ratings in Germany
(aka. Unterhaltungssoftware Selbstkontrolle).
"""
id = 2
iarc_name = 'USK'
ratings = (USK_0, USK_6, USK_12, USK_16, USK_18, USK_REJECTED)
name = 'USK'
description = _lazy(u'Germany')
full_name = _lazy(u'Entertainment Software Self-Regulation Body')
url = 'http://www.usk.de/en/'
class ESRB_E(RATING):
"""Everybody."""
id = 0
age = 0
iarc_name = 'Everyone'
name = _lazy('Everyone')
class ESRB_10(RATING):
id = 1
age = 10
iarc_name = 'Everyone 10+'
name = _lazy('Everyone 10+') # L10n: `10+` is age ten and over.
class ESRB_T(RATING):
id = 2
age = 13
iarc_name = 'Teen'
name = _lazy('Teen')
class ESRB_M(RATING):
id = 3
age = 17
iarc_name = 'Mature 17+'
name = _lazy('Mature 17+') # L10n: `17+` is age seventeen and over.
class ESRB_A(RATING):
id = 4
age = 18
iarc_name = 'Adults Only'
name = _lazy('Adults Only 18+') # L10n: `18+` is age eighteen and over.
adult = True
class ESRB(RATING_BODY):
"""
The North American game ratings body (i.e. USA, Canada).
"""
id = 3
iarc_name = 'ESRB'
ratings = (ESRB_E, ESRB_10, ESRB_T, ESRB_M, ESRB_A)
name = 'ESRB'
# L10n: North and South American, but not Brazil.
description = _lazy(u'All Americas except Brazil')
full_name = _lazy(u'Entertainment Software Rating Board')
url = 'http://esrb.org'
class PEGI_3(RATING):
id = 0
age = 3
iarc_name = '3+'
class PEGI_7(RATING):
id = 1
age = 7
iarc_name = '7+'
class PEGI_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class PEGI_16(RATING):
id = 3
age = 16
iarc_name = '16+'
class PEGI_18(RATING):
id = 4
age = 18
iarc_name = '18+'
adult = True
class PEGI(RATING_BODY):
"""
The European game ratings body (i.e. GBR, Poland, Spain).
"""
id = 4
iarc_name = 'PEGI'
ratings = (PEGI_3, PEGI_7, PEGI_12, PEGI_16, PEGI_18)
name = 'PEGI'
description = _lazy(u'Europe')
full_name = _lazy(u'Pan European Game Information')
url = 'http://www.pegi.info'
RATINGS_BODIES = {
CLASSIND.id: CLASSIND,
GENERIC.id: GENERIC,
USK.id: USK,
ESRB.id: ESRB,
PEGI.id: PEGI,
}
# Attach ratings bodies to ratings.
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
r.ratingsbody = rb
ALL_RATINGS_BODIES = [CLASSIND, GENERIC, USK, ESRB, PEGI]
def ALL_RATINGS():
"""
List of all ratings with waffled bodies.
"""
ALL_RATINGS = []
for rb in RATINGS_BODIES.values():
ALL_RATINGS.extend(rb.ratings)
return ALL_RATINGS
def RATINGS_BY_NAME():
"""
Create a list of tuples (choices) after we know the locale since this
attempts to concatenate two lazy translations in constants file.
"""
all_ratings = ALL_RATINGS()
ratings_choices = []
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
ratings_choices.append(
(all_ratings.index(r),
u'%s - %s' % (rb.name, dehydrate_rating(r).name)))
return ratings_choices
def slugify_iarc_name(obj):
"""
Converts ratings body's or rating's iarc_name to a slug-like label
(e.g. "USK" to "usk").
"""
return obj.iarc_name.lower().replace(' ', '-')
def dehydrate_rating(rating_class):
"""
Returns a rating with translated fields attached and with fields that are
easily created dynamically.
"""
rating = rating_class()
if rating.label is None:
rating.label = str(rating.age) or slugify_iarc_name(rating)
if rating.name is None:
if rating.age == 0:
rating.name = unicode(NAME_GENERAL)
else:
rating.name = unicode(NAME_LAZY) % rating.age
rating.name = unicode(rating.name)
return rating
def dehydrate_ratings_body(body_class):
"""Returns a rating body with translated fields attached."""
body = body_class()
if body.label is None:
body.label = slugify_iarc_name(body)
body.name = unicode(body.name)
body.description = unicode(body.description)
return body
def pth(path):
"""Prepends root icon path to path."""
return 'img/icons/ratings/' + path
IARC_ICONS = {
'ratings': {
# The keys are ratings' labels.
'classind': {
'0': pth('CLASSIND_L.png'),
'10': pth('CLASSIND_10.png'),
'12': pth('CLASSIND_12.png'),
'14': pth('CLASSIND_14.png'),
'16': pth('CLASSIND_16.png'),
'18': pth('CLASSIND_18.png'),
},
'esrb': {
'0': pth('ESRB_e.png'),
'10': pth('ESRB_e10.png'),
'13': pth('ESRB_t.png'),
'17': pth('ESRB_m.png'),
'18': pth('ESRB_ao.png'),
},
'generic': {
'3': pth('generic_3.png'),
'7': pth('generic_7.png'),
'12': pth('generic_12.png'),
'16': pth('generic_16.png'),
'18': pth('generic_18.png'),
'pending': pth('generic_rp.png'),
},
'pegi': {
'3': pth('pegi_3.png'),
'7': pth('pegi_7.png'),
'12': pth('pegi_12.png'),
'16': pth('pegi_16.png'),
'18': pth('pegi_18.png'),
},
'usk': {
'0': pth('USK_0.png'),
'6': pth('USK_6.png'),
'12': pth('USK_12.png'),
'16': pth('USK_16.png'),
'18': pth('USK_18.png'),
'rating-refused': pth('USK_RR.png')
}
},
'descriptors': {
'pegi': {
'has_pegi_discrimination':
pth('descriptors/pegi_discrimination.png'),
'has_pegi_drugs': pth('descriptors/pegi_drugs.png'),
'has_pegi_gambling': pth('descriptors/pegi_gambling.png'),
'has_pegi_lang': pth('descriptors/pegi_language.png'),
'has_pegi_nudity': pth('descriptors/pegi_nudity.png'),
'has_pegi_online': pth('descriptors/pegi_online.png'),
'has_pegi_scary': pth('descriptors/pegi_fear.png'),
'has_pegi_sex_content': pth('descriptors/pegi_sex.png'),
'has_pegi_violence': pth('descriptors/pegi_violence.png'),
'has_pegi_digital_purchases': pth(
'descriptors/pegi_inapp_purchase_option.png'),
'has_pegi_shares_info': pth(
'descriptors/pegi_personal_data_sharing.png'),
'has_pegi_shares_location': pth(
'descriptors/pegi_location_data_sharing.png'),
'has_pegi_users_interact': pth(
'descriptors/pegi_social_interaction_functionality.png'),
}
},
'interactive_elements': {
'has_shares_info': pth('interactives/ESRB_shares-info_small.png'),
'has_shares_location':
pth('interactives/ESRB_shares-location_small.png'),
'has_users_interact':
pth('interactives/ESRB_users-interact_small.png'),
'has_digital_purchases': pth(
'interactives/ESRB_digital-purchases_small.png'),
}
}
|
bschoenfeld/virginia-court-data-analysis
|
refs/heads/master
|
past_due_costs_vs_income.py
|
1
|
import csv
import sys
from os import environ, listdir
from os.path import isfile, join
from pprint import pprint
from census import Census
import numpy as np
import matplotlib.pyplot as plt
'''
Graph Past Due Court Costs vs Median Income By Zipcode
- Get income data from Census at tract level
- Group court cases by zipcode
- Get tracts (and income) for each zipcode
- Graph the results
'''
# Store court data csv files
path = sys.argv[1]
files = [join(path, f) for f in listdir(path) if isfile(join(path, f))]
# Get income data for Virginia at tract level
c = Census(environ['CENSUS_API_KEY'])
response = c.acs5.state_county_tract('B07011_001E', 51, '*', '*')
income_data = {
x['county'] + '_' + x['tract']: int(x['B07011_001E'])
for x in response if x['B07011_001E'] is not None
}
# Read through court data. For each case, group by zipcode and
# note if the case had costs or fines and if they are paid or if they are past due.
cases = 0
cases_with_fines = 0
cases_past_due = 0
cases_paid = 0
costs_by_zipcodes = {}
for f in files:
if not f.endswith('.csv'):
continue
print f
with open(f) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
cases += 1
if row['FineCostsDue'] != '' or row['FineCostsPaid'] != '':
zipcode = row['Address'].split(' ')[-1]
if len(zipcode) != 5:
continue
if zipcode not in costs_by_zipcodes:
costs_by_zipcodes[zipcode] = {
'count': 0,
'paid': 0,
'pastDue': 0,
'tracts': 0,
'outOfState': 0,
'noIncome': 0,
'incomes': []
}
cases_with_fines += 1
costs_by_zipcodes[zipcode]['count'] += 1
if 'PAST DUE' in row['FineCostsDue']:
cases_past_due += 1
costs_by_zipcodes[zipcode]['pastDue'] += 1
elif 'Paid' in row['FineCostsPaid']:
cases_paid += 1
costs_by_zipcodes[zipcode]['paid'] += 1
print 'Cases', cases
print 'With fines', int(float(cases_with_fines)/cases*100), '%'
print 'Paid', int(float(cases_paid)/cases_with_fines*100), '%'
print 'Past Due', int(float(cases_past_due)/cases_with_fines*100), '%'
# Use 2010 ZCTA to Census Tract Relationship File Layout (zcta_tract_rel_10.txt)
# from https://www.census.gov/geo/maps-data/data/zcta_rel_layout.html
# to get incomes for each zipcode
good_zips = set()
with open('data/zcta_tract_rel_10.txt') as f:
reader = csv.DictReader(f)
for row in reader:
good_zips.add(row['ZCTA5'])
if row['ZCTA5'] in costs_by_zipcodes:
zipcode = costs_by_zipcodes[row['ZCTA5']]
zipcode['tracts'] += 1
if row['STATE'] != '51':
zipcode['outOfState'] += 1
else:
tract_key = row['COUNTY'] + '_' + row['TRACT']
if tract_key in income_data:
zipcode['incomes'].append(income_data[tract_key])
else:
zipcode['noIncome'] += 1
# Aggregate income data for each zipcode and take note of how many cases
# we won't be able to include in the final graph due to bad zipcodes or
# no income data
has_income = 0
no_income = 0
no_tracts = 0
out_of_state = 0
bad_zip = 0
bad_zips = []
costsVsIncomes = []
for key in costs_by_zipcodes:
zipcode = costs_by_zipcodes[key]
if len(zipcode['incomes']) > 0:
zipcode['minIncome'] = np.min(zipcode['incomes'])
zipcode['maxIncome'] = np.max(zipcode['incomes'])
zipcode['meanIncome'] = np.mean(zipcode['incomes'])
zipcode['pastDueRatio'] = float(zipcode['pastDue']) / zipcode['count'] * 100
zipcode['paidRatio'] = float(zipcode['paid']) / zipcode['count'] * 100
costsVsIncomes.append((zipcode['pastDueRatio'], zipcode['meanIncome']))
has_income += zipcode['count']
if key not in good_zips:
bad_zip += zipcode['count']
bad_zips.append(bad_zips)
else:
if zipcode['tracts'] == 0:
no_tracts += zipcode['count']
elif zipcode['tracts'] == zipcode['outOfState']:
out_of_state += zipcode['count']
elif zipcode['tracts'] == zipcode['noIncome']:
no_income += zipcode['count']
print 'Has Income', int(float(has_income)/cases_past_due*100), '%'
print 'Out of state', int(float(out_of_state)/cases_past_due*100), '%'
print 'Bad zips', int(float(bad_zip)/cases_past_due*100), '%'
# Create graphs
costsVsIncomes.sort(key=lambda x: x[0])
plt.plot([x[0] for x in costsVsIncomes], [x[1] for x in costsVsIncomes], 'b.')
plt.xlabel('Percentage of Cases with Past Due Costs (excluding cases with no costs / fines)')
plt.ylabel('Median Income')
plt.title('Past Due Court Costs vs Median Income By Zipcode')
plt.show()
|
shawnadelic/shuup
|
refs/heads/master
|
shuup/core/fields/__init__.py
|
2
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import decimal
import numbers
import babel
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import BLANK_CHOICE_DASH
from django.forms.widgets import NumberInput
from django.utils.translation import ugettext_lazy as _
from jsonfield.fields import JSONField
from shuup.core.fields.tagged_json import tag_registry, TaggedJSONEncoder
from shuup.utils.i18n import get_current_babel_locale
IdentifierValidator = RegexValidator("[a-z][a-z_]+")
class InternalIdentifierField(models.CharField):
def __init__(self, **kwargs):
if "unique" not in kwargs:
raise ValueError("You must explicitly set the `unique` flag for `InternalIdentifierField`s.")
kwargs.setdefault("max_length", 64)
kwargs.setdefault("blank", True)
kwargs.setdefault("null", bool(kwargs.get("blank"))) # If it's allowed to be blank, it should be null
kwargs.setdefault("verbose_name", _("internal identifier"))
kwargs.setdefault("help_text", _(u"Do not change this value if you are not sure what you're doing."))
kwargs.setdefault("editable", False)
super(InternalIdentifierField, self).__init__(**kwargs)
self.validators.append(IdentifierValidator)
def get_prep_value(self, value):
# Save `None`s instead of falsy values (such as empty strings)
# for `InternalIdentifierField`s to avoid `IntegrityError`s on unique fields.
prepared_value = super(InternalIdentifierField, self).get_prep_value(value)
if self.null:
return (prepared_value or None)
return prepared_value
def deconstruct(self):
(name, path, args, kwargs) = super(InternalIdentifierField, self).deconstruct()
kwargs["null"] = self.null
kwargs["unique"] = self.unique
kwargs["blank"] = self.blank
# Irrelevant for migrations, and usually translated anyway:
kwargs.pop("verbose_name", None)
kwargs.pop("help_text", None)
return (name, path, args, kwargs)
class CurrencyField(models.CharField):
def __init__(self, **kwargs):
kwargs.setdefault("max_length", 4)
super(CurrencyField, self).__init__(**kwargs)
class FormattedDecimalFormField(forms.DecimalField):
# Chrome automatically converts a step with more than 5 decimals places to scientific notation
MAX_DECIMAL_PLACES_FOR_STEP = 5
def widget_attrs(self, widget):
# be more lenient when setting step than the default django widget_attrs
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places <= self.MAX_DECIMAL_PLACES_FOR_STEP:
step = format(decimal.Decimal('1') / 10 ** self.decimal_places, 'f')
else:
step = 'any'
widget.attrs.setdefault('step', step)
return super(FormattedDecimalFormField, self).widget_attrs(widget)
class FormattedDecimalField(models.DecimalField):
"""
DecimalField subclass to display decimal values in non-scientific
format.
"""
def value_from_object(self, obj):
value = super(FormattedDecimalField, self).value_from_object(obj)
if isinstance(value, numbers.Number):
return self.format_decimal(decimal.Decimal(str(value)))
def format_decimal(self, value, max_digits=100, exponent_limit=100):
assert isinstance(value, decimal.Decimal)
val = value.normalize()
(sign, digits, exponent) = val.as_tuple()
if exponent > exponent_limit:
raise ValueError('Exponent too large for formatting: %r' % value)
elif exponent < -exponent_limit:
raise ValueError('Exponent too small for formatting: %r' % value)
if len(digits) > max_digits:
raise ValueError('Too many digits for formatting: %r' % value)
return format(val, 'f')
def formfield(self, **kwargs):
kwargs.setdefault("form_class", FormattedDecimalFormField)
return super(FormattedDecimalField, self).formfield(**kwargs)
class MoneyValueField(FormattedDecimalField):
def __init__(self, **kwargs):
kwargs.setdefault("decimal_places", 9)
kwargs.setdefault("max_digits", 36)
super(MoneyValueField, self).__init__(**kwargs)
class QuantityField(FormattedDecimalField):
def __init__(self, **kwargs):
kwargs.setdefault("decimal_places", 9)
kwargs.setdefault("max_digits", 36)
kwargs.setdefault("default", 0)
super(QuantityField, self).__init__(**kwargs)
class MeasurementField(FormattedDecimalField):
KNOWN_UNITS = ("mm", "m", "kg", "g", "m3")
def __init__(self, unit, **kwargs):
if unit not in self.KNOWN_UNITS:
raise ImproperlyConfigured("Unit %r is not a known unit." % unit)
self.unit = unit
kwargs.setdefault("decimal_places", 9)
kwargs.setdefault("max_digits", 36)
kwargs.setdefault("default", 0)
super(MeasurementField, self).__init__(**kwargs)
def deconstruct(self):
parent = super(MeasurementField, self)
(name, path, args, kwargs) = parent.deconstruct()
kwargs["unit"] = self.unit
return (name, path, args, kwargs)
class LanguageField(models.CharField):
# TODO: This list will include extinct languages
LANGUAGE_CODES = set(babel.Locale("en").languages.keys())
def __init__(self, *args, **kwargs):
kwargs.setdefault("max_length", 10)
kwargs["choices"] = [(code, code) for code in sorted(self.LANGUAGE_CODES)]
super(LanguageField, self).__init__(*args, **kwargs)
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
locale = get_current_babel_locale()
translated_choices = [
(code, locale.languages.get(code, code))
for (code, _)
in super(LanguageField, self).get_choices(include_blank, blank_choice)
]
translated_choices.sort(key=lambda pair: pair[1].lower())
return translated_choices
# https://docs.djangoproject.com/en/1.8/ref/models/fields/#django.db.models.ForeignKey.allow_unsaved_instance_assignment
class UnsavedForeignKey(models.ForeignKey):
allow_unsaved_instance_assignment = True
class TaggedJSONField(JSONField):
def __init__(self, *args, **kwargs):
dump_kwargs = kwargs.setdefault("dump_kwargs", {})
dump_kwargs.setdefault("cls", TaggedJSONEncoder)
dump_kwargs.setdefault("separators", (',', ':'))
load_kwargs = kwargs.setdefault("load_kwargs", {})
load_kwargs.setdefault("object_hook", tag_registry.decode)
super(TaggedJSONField, self).__init__(*args, **kwargs)
|
wkentaro/chainer
|
refs/heads/master
|
chainer/functions/array/repeat.py
|
9
|
import six
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Repeat(function_node.FunctionNode):
"""Repeat elements of an array."""
def __init__(self, repeats, axis=None):
if isinstance(repeats, six.integer_types):
self.repeats = (repeats,)
elif isinstance(repeats, tuple) and all(
isinstance(x, six.integer_types) for x in repeats):
# Although it is not explicitly documented, NumPy/CuPy allows
# specifying bool or tuple of bools as `repeats`.
# Thus we just check type against `six.integer_types`, without
# excluding `bool`.
self.repeats = repeats
else:
raise TypeError('repeats must be int or tuple of ints')
if not all(x >= 0 for x in self.repeats):
raise ValueError('all elements in repeats must be zero or larger')
if axis is not None and (
not isinstance(axis, six.integer_types) or
isinstance(axis, bool)):
# `axis` cannot be bool, in contrast to `repeats`.
raise TypeError('axis must be int or None')
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, inputs):
self.retain_inputs((0,))
x, = inputs
xp = backend.get_array_module(x)
repeats = self.repeats
# Workaround for bug in NumPy 1.9 that specifying one element list to
# `repeats` fails to broadcast.
if len(repeats) == 1:
repeats = repeats[0]
return xp.repeat(x, repeats, self.axis),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return RepeatGrad(self.repeats, self.axis, x.shape, x.dtype).apply(
grad_outputs)
class RepeatGrad(function_node.FunctionNode):
def __init__(self, repeats, axis, in_shape, in_dtype):
self.repeats = repeats
self.axis = axis
if axis is not None and axis < 0:
self.axis += len(in_shape)
self.in_shape = in_shape
self.in_dtype = in_dtype
def forward(self, inputs):
gy, = inputs
xp = backend.get_array_module(gy)
repeats = self.repeats
axis = self.axis
shape = list(self.in_shape)
dtype = self.in_dtype
if len(gy) == 0:
gx = xp.zeros(shape, dtype)
return gx,
if len(repeats) == 1:
repeats = int(repeats[0])
if axis is None:
gx = gy.reshape(-1, repeats).sum(axis=1).reshape(shape)
else:
shape[axis:axis + 1] = [-1, repeats]
gx = gy.reshape(shape).sum(axis=axis + 1)
return gx,
if axis is None:
pos = 0
gx = xp.zeros(utils.size_of_shape(shape), dtype)
for (i, r) in enumerate(repeats):
gx[i] = xp.sum(gy[pos:pos + r])
pos += r
gx = gx.reshape(shape)
else:
gx = xp.zeros(shape, dtype)
pos = 0
src = [slice(None)] * axis + [None]
dst = [slice(None)] * axis + [None]
for (i, r) in enumerate(repeats):
src[-1] = slice(pos, pos + r)
dst[-1] = slice(i, i + 1)
gx[tuple(dst)] = gy[tuple(src)].sum(axis=axis, keepdims=True)
pos += r
return gx,
def backward(self, indexes, grad_outputs):
return Repeat(self.repeats, self.axis).apply(grad_outputs)
def repeat(x, repeats, axis=None):
"""Construct an array by repeating a given array.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable.
repeats (:class:`int` or :class:`tuple` of :class:`int` s):
The number of times which each element of ``x`` is repeated.
axis (:class:`int`):
The axis along which to repeat values.
Returns:
~chainer.Variable: The repeated output Variable.
.. admonition:: Example
>>> x = np.array([0, 1, 2])
>>> x.shape
(3,)
>>> y = F.repeat(x, 2)
>>> y.shape
(6,)
>>> y.array
array([0, 0, 1, 1, 2, 2])
>>> x = np.array([[1,2], [3,4]])
>>> x.shape
(2, 2)
>>> y = F.repeat(x, 3, axis=1)
>>> y.shape
(2, 6)
>>> y.array
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> y = F.repeat(x, (1, 2), axis=0)
>>> y.shape
(3, 2)
>>> y.array
array([[1, 2],
[3, 4],
[3, 4]])
"""
return Repeat(repeats, axis).apply((x,))[0]
|
simod/geonode
|
refs/heads/master
|
geonode/catalogue/backends/pycsw_local.py
|
1
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
from lxml import etree
from django.conf import settings
from ConfigParser import SafeConfigParser
from owslib.iso import MD_Metadata
from pycsw import server
from geonode.catalogue.backends.generic import CatalogueBackend as GenericCatalogueBackend
from geonode.catalogue.backends.generic import METADATA_FORMATS
from shapely.geometry.base import ReadingError
true_value = 'true'
if settings.DATABASES['default']['ENGINE'].endswith(('sqlite', 'sqlite3', 'spatialite',)):
true_value = '1'
# pycsw settings that the user shouldn't have to worry about
CONFIGURATION = {
'server': {
'home': '.',
'url': settings.CATALOGUE['default']['URL'],
'encoding': 'UTF-8',
'language': settings.LANGUAGE_CODE,
'maxrecords': '10',
# 'loglevel': 'DEBUG',
# 'logfile': '/tmp/pycsw.log',
# 'federatedcatalogues': 'http://geo.data.gov/geoportal/csw/discovery',
'pretty_print': 'true',
'domainquerytype': 'range',
'domaincounts': 'true',
'profiles': 'apiso,ebrim',
},
'repository': {
'source': 'geonode.catalogue.backends.pycsw_plugin.GeoNodeRepository',
'filter': 'is_published = %s' % true_value,
'mappings': os.path.join(os.path.dirname(__file__), 'pycsw_local_mappings.py')
}
}
class CatalogueBackend(GenericCatalogueBackend):
def __init__(self, *args, **kwargs):
super(CatalogueBackend, self).__init__(*args, **kwargs)
self.catalogue.formats = ['Atom', 'DIF', 'Dublin Core', 'ebRIM', 'FGDC', 'ISO']
self.catalogue.local = True
def remove_record(self, uuid):
pass
def create_record(self, item):
pass
def get_record(self, uuid):
results = self._csw_local_dispatch(identifier=uuid)
if len(results) < 1:
return None
result = etree.fromstring(results).find('{http://www.isotc211.org/2005/gmd}MD_Metadata')
if result is None:
return None
record = MD_Metadata(result)
record.keywords = []
if hasattr(record, 'identification') and hasattr(record.identification, 'keywords'):
for kw in record.identification.keywords:
record.keywords.extend(kw['keywords'])
record.links = {}
record.links['metadata'] = self.catalogue.urls_for_uuid(uuid)
record.links['download'] = self.catalogue.extract_links(record)
return record
def search_records(self, keywords, start, limit, bbox):
with self.catalogue:
lresults = self._csw_local_dispatch(keywords, keywords, start+1, limit, bbox)
# serialize XML
e = etree.fromstring(lresults)
self.catalogue.records = \
[MD_Metadata(x) for x in e.findall('//{http://www.isotc211.org/2005/gmd}MD_Metadata')]
# build results into JSON for API
results = [self.catalogue.metadatarecord2dict(doc) for v, doc in self.catalogue.records.iteritems()]
result = {'rows': results,
'total': e.find('{http://www.opengis.net/cat/csw/2.0.2}SearchResults').attrib.get(
'numberOfRecordsMatched'),
'next_page': e.find('{http://www.opengis.net/cat/csw/2.0.2}SearchResults').attrib.get(
'nextRecord')
}
return result
def _csw_local_dispatch(self, keywords=None, start=0, limit=10, bbox=None, identifier=None):
"""
HTTP-less CSW
"""
# serialize pycsw settings into SafeConfigParser
# object for interaction with pycsw
mdict = dict(settings.PYCSW['CONFIGURATION'], **CONFIGURATION)
if 'server' in settings.PYCSW['CONFIGURATION']:
# override server system defaults with user specified directives
mdict['server'].update(settings.PYCSW['CONFIGURATION']['server'])
config = SafeConfigParser()
for section, options in mdict.iteritems():
config.add_section(section)
for option, value in options.iteritems():
config.set(section, option, value)
# fake HTTP environment variable
os.environ['QUERY_STRING'] = ''
# init pycsw
csw = server.Csw(config, version='2.0.2')
# fake HTTP method
csw.requesttype = 'GET'
# fake HTTP request parameters
if identifier is None: # it's a GetRecords request
formats = []
for f in self.catalogue.formats:
formats.append(METADATA_FORMATS[f][0])
csw.kvp = {
'service': 'CSW',
'version': '2.0.2',
'elementsetname': 'full',
'typenames': formats,
'resulttype': 'results',
'constraintlanguage': 'CQL_TEXT',
'outputschema': 'http://www.isotc211.org/2005/gmd',
'constraint': None,
'startposition': start,
'maxrecords': limit
}
response = csw.getrecords()
else: # it's a GetRecordById request
csw.kvp = {
'service': 'CSW',
'version': '2.0.2',
'request': 'GetRecordById',
'id': identifier,
'outputschema': 'http://www.isotc211.org/2005/gmd',
}
# FIXME(Ariel): Remove this try/except block when pycsw deals with
# empty geometry fields better.
# https://gist.github.com/ingenieroariel/717bb720a201030e9b3a
try:
response = csw.dispatch()
except ReadingError:
return []
if isinstance(response, list): # pycsw 2.0+
response = response[1]
return response
|
uppsaladatavetare/foobar-api
|
refs/heads/develop
|
src/foobar/migrations/0019_auto_20170221_1547.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-21 15:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('foobar', '0018_auto_20170220_1322'),
]
operations = [
migrations.AlterModelOptions(
name='walletlogentry',
options={'verbose_name': 'wallet log entry', 'verbose_name_plural': 'wallet log entries'},
),
migrations.AlterField(
model_name='walletlogentry',
name='wallet',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='log_entries', to='wallet.Wallet'),
),
]
|
jasonbot/django
|
refs/heads/master
|
tests/utils_tests/models.py
|
188
|
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
def next(self):
return self
class Thing(models.Model):
name = models.CharField(max_length=100)
category = models.ForeignKey(Category, models.CASCADE)
|
uglyboxer/linear_neuron
|
refs/heads/master
|
net-p3/lib/python3.5/site-packages/matplotlib/fontconfig_pattern.py
|
11
|
"""
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <mdroe@stsci.edu>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import re, sys
from pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException as e:
raise ValueError(
"Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
self._parser.resetCache()
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
|
PetrDlouhy/django-crispy-forms
|
refs/heads/dev
|
crispy_forms/base.py
|
10
|
# -*- coding: utf-8 -*-
def from_iterable(iterables):
"""
Backport of `itertools.chain.from_iterable` compatible with Python 2.5
"""
for it in iterables:
for element in it:
if isinstance(element, dict):
for key in element:
yield key
else:
yield element
class KeepContext(object):
"""
Context manager that receives a `django.template.Context` instance, tracks its changes
and rolls them back when exiting the context manager, leaving the context unchanged.
Layout objects can introduce context variables, that may cause side effects in later
layout objects. This avoids that situation, without copying context every time.
"""
def __init__(self, context):
self.context = context
def __enter__(self):
self.old_set_keys = set(from_iterable(self.context.dicts))
def __exit__(self, type, value, traceback):
current_set_keys = set(from_iterable(self.context.dicts))
diff_keys = current_set_keys - self.old_set_keys
# We remove added keys for rolling back changes
for key in diff_keys:
self._delete_key_from_context(key)
def _delete_key_from_context(self, key):
for d in self.context.dicts:
if key in d:
del d[key]
|
andreiw/xen3-arm-tegra
|
refs/heads/master
|
tools/python/xen/xend/XendProtocol.py
|
3
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd.
#============================================================================
import socket
import httplib
import time
import types
from encode import *
import sxp
from xen.xend import XendRoot
DEBUG = 0
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NO_CONTENT = 204
xroot = XendRoot.instance()
class XendError(RuntimeError):
"""Error class for 'expected errors' when talking to xend.
"""
pass
class XendRequest:
"""A request to xend.
"""
def __init__(self, url, method, args):
"""Create a request. Sets up the headers, argument data, and the
url.
@param url: the url to request
@param method: request method, GET or POST
@param args: dict containing request args, if any
"""
if url.proto != 'http':
raise ValueError('Invalid protocol: ' + url.proto)
(hdr, data) = encode_data(args)
if args and method == 'GET':
url.query = data
data = None
if method == "POST" and url.path.endswith('/'):
url.path = url.path[:-1]
self.headers = hdr
self.data = data
self.url = url
self.method = method
class XendClientProtocol:
"""Abstract class for xend clients.
"""
def xendRequest(self, url, method, args=None):
"""Make a request to xend.
Implement in a subclass.
@param url: xend request url
@param method: http method: POST or GET
@param args: request arguments (dict)
"""
raise NotImplementedError()
def xendGet(self, url, args=None):
"""Make a xend request using HTTP GET.
Requests using GET are usually 'safe' and may be repeated without
nasty side-effects.
@param url: xend request url
@param data: request arguments (dict)
"""
return self.xendRequest(url, "GET", args)
def xendPost(self, url, args):
"""Make a xend request using HTTP POST.
Requests using POST potentially cause side-effects, and should
not be repeated unless you really want to repeat the side
effect.
@param url: xend request url
@param args: request arguments (dict)
"""
return self.xendRequest(url, "POST", args)
def handleStatus(self, _, status, message):
"""Handle the status returned from the request.
"""
status = int(status)
if status in [ HTTP_NO_CONTENT ]:
return None
if status not in [ HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED ]:
return self.handleException(XendError(message))
return 'ok'
def handleResponse(self, data):
"""Handle the data returned in response to the request.
"""
if data is None: return None
typ = self.getHeader('Content-Type')
if typ != sxp.mime_type:
return data
try:
pin = sxp.Parser()
pin.input(data);
pin.input_eof()
val = pin.get_val()
except sxp.ParseError, err:
return self.handleException(err)
if isinstance(val, types.ListType) and sxp.name(val) == 'xend.err':
err = XendError(val[1])
return self.handleException(err)
return val
def handleException(self, err):
"""Handle an exception during the request.
May be overridden in a subclass.
"""
raise err
def getHeader(self, key):
"""Get a header from the response.
Case is ignored in the key.
@param key: header key
@return: header
"""
raise NotImplementedError()
class HttpXendClientProtocol(XendClientProtocol):
"""A synchronous xend client. This will make a request, wait for
the reply and return the result.
"""
resp = None
request = None
def makeConnection(self, url):
return httplib.HTTPConnection(url.location())
def makeRequest(self, url, method, args):
return XendRequest(url, method, args)
def xendRequest(self, url, method, args=None):
"""Make a request to xend.
@param url: xend request url
@param method: http method: POST or GET
@param args: request arguments (dict)
"""
retries = 0
while retries < 2:
self.request = self.makeRequest(url, method, args)
conn = self.makeConnection(url)
try:
if DEBUG: conn.set_debuglevel(1)
conn.request(method, url.fullpath(), self.request.data,
self.request.headers)
try:
resp = conn.getresponse()
self.resp = resp
val = self.handleStatus(resp.version, resp.status,
resp.reason)
if val is None:
data = None
else:
data = resp.read()
val = self.handleResponse(data)
return val
except httplib.BadStatusLine:
retries += 1
time.sleep(5)
finally:
conn.close()
raise XendError("Received invalid response from Xend, twice.")
def getHeader(self, key):
return self.resp.getheader(key)
class UnixConnection(httplib.HTTPConnection):
"""Subclass of Python library HTTPConnection that uses a unix-domain socket.
"""
def __init__(self, path):
httplib.HTTPConnection.__init__(self, 'localhost')
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class UnixXendClientProtocol(HttpXendClientProtocol):
"""A synchronous xend client using a unix-domain socket.
"""
def __init__(self, path=None):
if path is None:
path = xroot.get_xend_unix_path()
self.path = path
def makeConnection(self, _):
return UnixConnection(self.path)
|
cst13/canstel
|
refs/heads/master
|
tx.py
|
2
|
from moneywagon import (
get_unspent_outputs, CurrentPrice, get_optimal_fee, PushTx,
get_onchain_exchange_rates
)
from moneywagon.core import get_optimal_services, get_magic_bytes
from bitcoin import mktx, sign, pubtoaddr, privtopub
from .crypto_data import crypto_data
class Transaction(object):
def __init__(self, crypto, hex=None, verbose=False):
if crypto.lower() in ['nxt']:
raise NotImplementedError("%s not yet supported" % crypto.upper())
self.change_address = None
self.crypto = crypto
self.fee_satoshi = None
self.outs = []
self.ins = []
self.onchain_rate = None
self.verbose = verbose
if hex:
self.hex = hex
def from_unit_to_satoshi(self, value, unit='satoshi'):
"""
Convert a value to satoshis. units can be any fiat currency.
By default the unit is satoshi.
"""
if not unit or unit == 'satoshi':
return value
if unit == 'bitcoin' or unit == 'btc':
return value * 1e8
# assume fiat currency that we can convert
convert = get_current_price(self.crypto, unit)
return int(value / convert * 1e8)
def add_raw_inputs(self, inputs, private_key=None):
"""
Add a set of utxo's to this transaction. This method is better to use if you
want more fine control of which inputs get added to a transaction.
`inputs` is a list of "unspent outputs" (they were 'outputs' to previous transactions,
and 'inputs' to subsiquent transactions).
`private_key` - All inputs will be signed by the passed in private key.
"""
for i in inputs:
self.ins.append(dict(input=i, private_key=private_key))
self.change_address = i['address']
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
)
def private_key_to_address(self, pk):
"""
Convert a private key (in hex format) into an address.
"""
pub = privtopub(pk)
pub_byte, priv_byte = get_magic_bytes(self.crypto)
if priv_byte >= 128:
priv_byte -= 128 #pybitcointools bug
return pubtoaddr(pub, pub_byte)
def add_inputs(self, private_key=None, address=None, amount='all', max_ins=None, password=None, services=None, **modes):
"""
Make call to external service to get inputs from an address and/or private_key.
`amount` is the amount of [currency] worth of inputs (in satoshis) to add from
this address. Pass in 'all' (the default) to use *all* inputs found for this address.
Returned is the number of units (in satoshis) that were added as inputs to this tx.
"""
if private_key:
if private_key.startswith('6P'):
if not password:
raise Exception("Password required for BIP38 encoded private keys")
from .bip38 import Bip38EncryptedPrivateKey
private_key = Bip38EncryptedPrivateKey(self.crypto, private_key).decrypt(password)
address_from_priv = self.private_key_to_address(private_key)
if address and address != address_from_priv:
raise Exception("Invalid Private key")
address = address_from_priv
self.change_address = address
if not services:
services = get_optimal_services(self.crypto, 'unspent_outputs')
total_added_satoshi = 0
ins = 0
for utxo in self._get_utxos(address, services, **modes):
if max_ins and ins >= max_ins:
break
if (amount == 'all' or total_added_satoshi < amount):
ins += 1
self.ins.append(
dict(input=utxo, private_key=private_key)
)
total_added_satoshi += utxo['amount']
return total_added_satoshi, ins
def total_input_satoshis(self):
"""
Add up all the satoshis coming from all input tx's.
"""
just_inputs = [x['input'] for x in self.ins]
return sum([x['amount'] for x in just_inputs])
def select_inputs(self, amount):
'''Maximize transaction priority. Select the oldest inputs,
that are sufficient to cover the spent amount. Then,
remove any unneeded inputs, starting with
the smallest in value.
Returns sum of amounts of inputs selected'''
sorted_txin = sorted(self.ins, key=lambda x:-x['input']['confirmations'])
total_amount = 0
for (idx, tx_in) in enumerate(sorted_txin):
total_amount += tx_in['input']['amount']
if (total_amount >= amount):
break
sorted_txin = sorted(sorted_txin[:idx+1], key=lambda x:x['input']['amount'])
for (idx, tx_in) in enumerate(sorted_txin):
value = tx_in['input']['amount']
if (total_amount - value < amount):
break
else:
total_amount -= value
self.ins = sorted_txin[idx:]
return total_amount
def add_output(self, address, value, unit='satoshi'):
"""
Add an output (a person who will receive funds via this tx).
If no unit is specified, satoshi is implied.
"""
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f)" % (
value_satoshi, (value_satoshi / 1e8)
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit='satoshi'):
"""
This method is like `add_output` but it sends to another
"""
self.onchain_rate = get_onchain_exchange_rates(
self.crypto, withdraw_crypto, best=True, verbose=self.verbose
)
exchange_rate = float(self.onchain_rate['rate'])
result = self.onchain_rate['service'].get_onchain_exchange_address(
self.crypto, withdraw_crypto, withdraw_address
)
address = result['deposit']
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % (
value_satoshi, (value_satoshi / 1e8),
exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper()
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
convert = get_current_price(self.crypto, "usd")
self.fee_satoshi = int(0.02 / convert * 1e8)
verbose = "Using default fee of:"
elif value == 'optimal':
self.fee_satoshi = get_optimal_fee(
self.crypto, self.estimate_size(), verbose=self.verbose
)
verbose = "Using optimal fee of:"
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = "Using manually set fee of:"
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, "usd")
fee_dollar = convert * self.fee_satoshi / 1e8
print(verbose + " %s satoshis ($%.2f)" % (self.fee_satoshi, fee_dollar))
def estimate_size(self):
"""
Estimate how many bytes this transaction will be by countng inputs
and outputs.
Formula taken from: http://bitcoin.stackexchange.com/a/3011/18150
"""
# if there are no outs use 1 (because the change will be an out)
outs = len(self.outs) or 1
return outs * 34 + 148 * len(self.ins) + 10
def get_hex(self, signed=True):
"""
Given all the data the user has given so far, make the hex using pybitcointools
"""
total_ins_satoshi = self.total_input_satoshis()
if total_ins_satoshi == 0:
raise ValueError("Can't make transaction, there are zero inputs")
# Note: there can be zero outs (sweep or coalesc transactions)
total_outs_satoshi = sum([x['value'] for x in self.outs])
if not self.fee_satoshi:
self.fee() # use default of $0.02
change_satoshi = total_ins_satoshi - (total_outs_satoshi + self.fee_satoshi)
if change_satoshi < 0:
raise ValueError(
"Input amount (%s) must be more than all output amounts (%s) plus fees (%s). You need more %s."
% (total_ins_satoshi, total_outs_satoshi, self.fee_satoshi, self.crypto.upper())
)
ins = [x['input'] for x in self.ins]
if change_satoshi > 0:
if self.verbose:
print("Adding change address of %s satoshis to %s" % (change_satoshi, self.change_address))
change = [{'value': change_satoshi, 'address': self.change_address}]
else:
change = [] # no change ?!
if self.verbose: print("Inputs == Outputs, no change address needed.")
tx = mktx(ins, self.outs + change)
if signed:
for i, input_data in enumerate(self.ins):
if not input_data['private_key']:
raise Exception("Can't sign transaction, missing private key for input %s" % i)
tx = sign(tx, i, input_data['private_key'])
return tx
def push(self, services=None, redundancy=1):
if not services:
services = get_optimal_services(self.crypto, "push_tx")
self.pushers = []
pusher = PushTx(services=services, verbose=self.verbose)
results = [pusher.action(self.crypto, self.get_hex())]
try:
for service in services[1:redundancy-1]:
pusher = PushTx(services=[service], verbose=self.verbose)
results.append(self.pusher.action(self.crypto, self.get_hex()))
self.pushers.append(pusher)
except:
raise Exception("Partial push. Some services returned success, some failed.")
return results
|
simonwydooghe/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_log_setting.py
|
7
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_setting
short_description: Configure general log settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_setting:
description:
- Configure general log settings.
default: null
type: dict
suboptions:
brief_traffic_format:
description:
- Enable/disable brief format traffic logging.
type: str
choices:
- enable
- disable
custom_log_fields:
description:
- Custom fields to append to all log messages.
type: list
suboptions:
field_id:
description:
- Custom log field. Source log.custom-field.id.
type: str
daemon_log:
description:
- Enable/disable daemon logging.
type: str
choices:
- enable
- disable
expolicy_implicit_log:
description:
- Enable/disable explicit proxy firewall implicit policy logging.
type: str
choices:
- enable
- disable
fwpolicy_implicit_log:
description:
- Enable/disable implicit firewall policy logging.
type: str
choices:
- enable
- disable
fwpolicy6_implicit_log:
description:
- Enable/disable implicit firewall policy6 logging.
type: str
choices:
- enable
- disable
local_in_allow:
description:
- Enable/disable local-in-allow logging.
type: str
choices:
- enable
- disable
local_in_deny_broadcast:
description:
- Enable/disable local-in-deny-broadcast logging.
type: str
choices:
- enable
- disable
local_in_deny_unicast:
description:
- Enable/disable local-in-deny-unicast logging.
type: str
choices:
- enable
- disable
local_out:
description:
- Enable/disable local-out logging.
type: str
choices:
- enable
- disable
log_invalid_packet:
description:
- Enable/disable invalid packet traffic logging.
type: str
choices:
- enable
- disable
log_policy_comment:
description:
- Enable/disable inserting policy comments into traffic logs.
type: str
choices:
- enable
- disable
log_policy_name:
description:
- Enable/disable inserting policy name into traffic logs.
type: str
choices:
- enable
- disable
log_user_in_upper:
description:
- Enable/disable logs with user-in-upper.
type: str
choices:
- enable
- disable
neighbor_event:
description:
- Enable/disable neighbor event logging.
type: str
choices:
- enable
- disable
resolve_ip:
description:
- Enable/disable adding resolved domain names to traffic logs if possible.
type: str
choices:
- enable
- disable
resolve_port:
description:
- Enable/disable adding resolved service names to traffic logs.
type: str
choices:
- enable
- disable
user_anonymize:
description:
- Enable/disable anonymizing user names in log messages.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure general log settings.
fortios_log_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_setting:
brief_traffic_format: "enable"
custom_log_fields:
-
field_id: "<your_own_value> (source log.custom-field.id)"
daemon_log: "enable"
expolicy_implicit_log: "enable"
fwpolicy_implicit_log: "enable"
fwpolicy6_implicit_log: "enable"
local_in_allow: "enable"
local_in_deny_broadcast: "enable"
local_in_deny_unicast: "enable"
local_out: "enable"
log_invalid_packet: "enable"
log_policy_comment: "enable"
log_policy_name: "enable"
log_user_in_upper: "enable"
neighbor_event: "enable"
resolve_ip: "enable"
resolve_port: "enable"
user_anonymize: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_setting_data(json):
option_list = ['brief_traffic_format', 'custom_log_fields', 'daemon_log',
'expolicy_implicit_log', 'fwpolicy_implicit_log', 'fwpolicy6_implicit_log',
'local_in_allow', 'local_in_deny_broadcast', 'local_in_deny_unicast',
'local_out', 'log_invalid_packet', 'log_policy_comment',
'log_policy_name', 'log_user_in_upper', 'neighbor_event',
'resolve_ip', 'resolve_port', 'user_anonymize']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_setting(data, fos):
vdom = data['vdom']
log_setting_data = data['log_setting']
filtered_data = underscore_to_hyphen(filter_log_setting_data(log_setting_data))
return fos.set('log',
'setting',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log(data, fos):
if data['log_setting']:
resp = log_setting(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_setting": {
"required": False, "type": "dict", "default": None,
"options": {
"brief_traffic_format": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"custom_log_fields": {"required": False, "type": "list",
"options": {
"field_id": {"required": False, "type": "str"}
}},
"daemon_log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"expolicy_implicit_log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fwpolicy_implicit_log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fwpolicy6_implicit_log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_in_allow": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_in_deny_broadcast": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_in_deny_unicast": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_out": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_invalid_packet": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_policy_comment": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_policy_name": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"log_user_in_upper": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"neighbor_event": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"resolve_ip": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"resolve_port": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"user_anonymize": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
artwr/airflow
|
refs/heads/master
|
airflow/operators/latest_only_operator.py
|
3
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.models.skipmixin import SkipMixin
from airflow.utils import timezone
class LatestOnlyOperator(BaseOperator, SkipMixin):
"""
Allows a workflow to skip tasks that are not running during the most
recent schedule interval.
If the task is run outside of the latest schedule interval, all
directly downstream tasks will be skipped.
"""
ui_color = '#e9ffdb' # nyanza
def execute(self, context):
# If the DAG Run is externally triggered, then return without
# skipping downstream tasks
if context['dag_run'] and context['dag_run'].external_trigger:
self.log.info("Externally triggered DAG_Run: allowing execution to proceed.")
return
now = timezone.utcnow()
left_window = context['dag'].following_schedule(
context['execution_date'])
right_window = context['dag'].following_schedule(left_window)
self.log.info(
'Checking latest only with left_window: %s right_window: %s now: %s',
left_window, right_window, now
)
if not left_window < now <= right_window:
self.log.info('Not latest execution, skipping downstream.')
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'],
context['ti'].execution_date,
downstream_tasks)
self.log.info('Done.')
else:
self.log.info('Latest, allowing execution to proceed.')
|
felipenaselva/felipe.repository
|
refs/heads/master
|
plugin.video.streamhub/resources/lib/modules/jsunpack.py
|
67
|
"""
urlresolver XBMC Addon
Copyright (C) 2013 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Adapted for use in xbmc from:
https://github.com/einars/js-beautify/blob/master/python/jsbeautifier/unpackers/packer.py
usage:
if detect(some_string):
unpacked = unpack(some_string)
Unpacker for Dean Edward's p.a.c.k.e.r
"""
import re
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
source = source.replace(' ', '')
if re.search('eval\(function\(p,a,c,k,e,(?:r|d)', source): return True
else: return False
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\s*\('(.*)',\s*(.*?),\s*(\d+),\s*'(.*?)'\.split\('\|'\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
payload, radix, count, symtab = args
radix = 36 if not radix.isdigit() else int(radix)
return payload, symtab.split('|'), radix, int(count)
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62: '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95: (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
if base < 62:
self.ALPHABET[base] = self.ALPHABET[62][0:base]
elif 62 < base < 95:
self.ALPHABET[base] = self.ALPHABET[95][0:base]
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
class UnpackingError(Exception):
"""Badly packed source or general error. Argument is a
meaningful description."""
pass
if __name__ == "__main__":
# test = '''eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('4(\'30\').2z({2y:\'5://a.8.7/i/z/y/w.2x\',2w:{b:\'2v\',19:\'<p><u><2 d="20" c="#17">2u 19.</2></u><16/><u><2 d="18" c="#15">2t 2s 2r 2q.</2></u></p>\',2p:\'<p><u><2 d="20" c="#17">2o 2n b.</2></u><16/><u><2 d="18" c="#15">2m 2l 2k 2j.</2></u></p>\',},2i:\'2h\',2g:[{14:"11",b:"5://a.8.7/2f/13.12"},{14:"2e",b:"5://a.8.7/2d/13.12"},],2c:"11",2b:[{10:\'2a\',29:\'5://v.8.7/t-m/m.28\'},{10:\'27\'}],26:{\'25-3\':{\'24\':{\'23\':22,\'21\':\'5://a.8.7/i/z/y/\',\'1z\':\'w\',\'1y\':\'1x\'}}},s:\'5://v.8.7/t-m/s/1w.1v\',1u:"1t",1s:"1r",1q:\'1p\',1o:"1n",1m:"1l",1k:\'5\',1j:\'o\',});l e;l k=0;l 6=0;4().1i(9(x){f(6>0)k+=x.r-6;6=x.r;f(q!=0&&k>=q){6=-1;4().1h();4().1g(o);$(\'#1f\').j();$(\'h.g\').j()}});4().1e(9(x){6=-1});4().1d(9(x){n(x)});4().1c(9(){$(\'h.g\').j()});9 n(x){$(\'h.g\').1b();f(e)1a;e=1;}',36,109,'||font||jwplayer|http|p0102895|me|vidto|function|edge3|file|color|size|vvplay|if|video_ad|div||show|tt102895|var|player|doPlay|false||21600|position|skin|test||static|1y7okrqkv4ji||00020|01|type|360p|mp4|video|label|FFFFFF|br|FF0000||deleted|return|hide|onComplete|onPlay|onSeek|play_limit_box|setFullscreen|stop|onTime|dock|provider|391|height|650|width|over|controlbar|5110|duration|uniform|stretching|zip|stormtrooper|213|frequency|prefix||path|true|enabled|preview|timeslidertooltipplugin|plugins|html5|swf|src|flash|modes|hd_default|3bjhohfxpiqwws4phvqtsnolxocychumk274dsnkblz6sfgq6uz6zt77gxia|240p|3bjhohfxpiqwws4phvqtsnolxocychumk274dsnkba36sfgq6uzy3tv2oidq|hd|original|ratio|broken|is|link|Your|such|No|nofile|more|any|availabe|Not|File|OK|previw|jpg|image|setup|flvplayer'.split('|')))'''
# test = '''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('y.x(A(\'%0%f%b%9%1%d%8%8%o%e%B%c%0%e%d%0%f%w%1%7%3%2%p%d%1%n%2%1%c%0%t%0%f%7%8%8%d%5%6%1%7%e%b%l%7%1%2%e%9%q%c%0%6%1%z%2%0%f%b%1%9%c%0%s%6%6%l%G%4%4%5%5%5%k%b%7%5%8%o%i%2%k%6%i%4%2%3%p%2%n%4%5%7%6%9%s%4%j%q%a%h%a%3%a%E%a%3%D%H%9%K%C%I%m%r%g%h%L%v%g%u%F%r%g%3%J%3%j%3%m%h%4\'));',48,48,'22|72|65|6d|2f|77|74|61|6c|63|4e|73|3d|6f|6e|20|4d|32|76|59|2e|70|51|64|69|62|79|31|68|30|7a|34|66|write|document|75|unescape|67|4f|5a|57|55|3a|44|47|4a|78|49'.split('|'),0,{}))'''
# test = '''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('x.w(z(\'%1%f%9%b%0%d%7%7%m%e%A%c%1%e%d%1%f%v%0%3%i%2%o%d%0%s%2%0%c%1%q%1%f%3%7%7%d%6%5%0%3%e%9%l%3%0%2%e%b%g%c%1%5%0%y%2%1%f%9%0%b%c%1%r%5%5%l%E%4%4%6%6%6%n%9%3%6%7%m%k%2%n%5%k%4%2%i%o%2%s%4%6%3%5%b%r%4%8%D%h%C%a%F%8%H%B%I%h%i%a%g%8%u%a%q%j%t%j%g%8%t%h%p%j%p%a%G%4\'));',45,45,'72|22|65|61|2f|74|77|6c|5a|73|55|63|3d|6f|6e|20|79|59|6d|4d|76|70|69|2e|62|7a|30|68|64|44|54|66|write|document|75|unescape|67|51|32|6a|3a|35|5f|47|34'.split('|'),0,{}))'''
test = '''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('q.r(s(\'%h%t%a%p%u%6%c%n%0%5%l%4%2%4%7%j%0%8%1%o%b%3%7%m%1%8%a%7%b%3%d%6%1%f%0%v%1%5%D%9%0%5%c%g%0%4%A%9%0%f%k%z%2%8%1%C%2%i%d%6%2%3%k%j%2%3%y%e%x%w%g%B%E%F%i%h%e\'));',42,42,'5a|4d|4f|54|6a|44|33|6b|57|7a|56|4e|68|55|3e|47|69|65|6d|32|45|46|31|6f|30|75|document|write|unescape|6e|62|6c|2f|3c|22|79|63|66|78|59|72|61'.split('|'),0,{}))'''
print unpack(test)
|
nmabhi/Webface
|
refs/heads/master
|
api-docs/conf.py
|
9
|
#!/usr/bin/env python2
import sys
import mock
import os
sys.path.insert(0, os.path.abspath('..'))
MOCK_MODULES = ['argparse', 'cv2', 'dlib', 'numpy', 'numpy.linalg', 'pandas']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.MagicMock()
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
autoclass_content = 'both'
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
project = 'OpenFace API Docs'
copyright = '2015-2016, Carnegie Mellon University'
author = 'Carnegie Mellon University'
version = '0.1.1'
release = '0.1.1'
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = True
def setup(app):
app.add_javascript("sp.js")
app.add_javascript("track.js")
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'OpenFacedoc'
latex_elements = {
'papersize': 'letterpaper',
'pointsize': '12pt',
}
latex_documents = [
(master_doc, 'OpenFace.tex', 'OpenFace Documentation',
'Carnegie Mellon University', 'manual'),
]
man_pages = [
(master_doc, 'openface', 'OpenFace Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'OpenFace', 'OpenFace Documentation',
author, 'OpenFace', 'One line description of project.',
'Miscellaneous'),
]
|
75651/kbengine_cloud
|
refs/heads/master
|
kbe/res/scripts/common/Lib/lib2to3/__init__.py
|
737
|
#empty
|
TeMPO-Consulting/mediadrop
|
refs/heads/axitube
|
mediacore/config/environment.py
|
1
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2013 MediaCore Inc., Felix Schwarz and other contributors.
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""Pylons environment configuration"""
import os
from formencode.api import get_localedir as get_formencode_localedir
from genshi.filters.i18n import Translator
import pylons
from pylons import translator
from pylons.configuration import PylonsConfig
from sqlalchemy import engine_from_config
import mediacore.lib.app_globals as app_globals
import mediacore.lib.helpers
from mediacore.config.routing import make_map
from mediacore.lib.templating import TemplateLoader
from mediacore.model import Media, Podcast, init_model
from mediacore.plugin import PluginManager, events
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config`` object"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='mediacore', paths=paths)
# Initialize the plugin manager to load all active plugins
plugin_mgr = PluginManager(config)
mapper = make_map(config, plugin_mgr.controller_scan)
events.Environment.routes(mapper)
config['routes.map'] = mapper
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.app_globals'].plugin_mgr = plugin_mgr
config['pylons.app_globals'].events = events
config['pylons.h'] = mediacore.lib.helpers
# Setup cache object as early as possible
pylons.cache._push_object(config['pylons.app_globals'].cache)
config['locale_dirs'] = plugin_mgr.locale_dirs()
config['locale_dirs'].update({
'mediacore': os.path.join(root, 'i18n'),
'FormEncode': get_formencode_localedir(),
})
def enable_i18n_for_template(template):
translations = Translator(translator)
translations.setup(template)
# Create the Genshi TemplateLoader
config['pylons.app_globals'].genshi_loader = TemplateLoader(
search_path=paths['templates'] + plugin_mgr.template_loaders(),
auto_reload=True,
max_cache_size=100,
callback=enable_i18n_for_template,
)
# Setup the SQLAlchemy database engine
engine = engine_from_config(config, 'sqlalchemy.')
init_model(engine, config.get('db_table_prefix', None))
events.Environment.init_model()
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
# TODO: Move as many of these custom options into an .ini file, or at least
# to somewhere more friendly.
# TODO: Rework templates not to rely on this line:
# See docstring in pylons.configuration.PylonsConfig for details.
config['pylons.strict_tmpl_context'] = False
config['thumb_sizes'] = { # the dimensions (in pixels) to scale thumbnails
Media._thumb_dir: {
's': (128, 72),
'm': (160, 90),
'l': (560, 315),
},
Podcast._thumb_dir: {
's': (128, 128),
'm': (160, 160),
'l': (600, 600),
},
}
# END CUSTOM CONFIGURATION OPTIONS
events.Environment.loaded(config)
return config
|
Dark-Hacker/flasky
|
refs/heads/master
|
app/api_1_0/users.py
|
104
|
from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
|
DevangS/CoralNet
|
refs/heads/master
|
images/migrations/0004_big_changes_to_image_and_metadata_and_add_location_value_models.py
|
1
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Value2'
db.create_table('images_value2', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Source'])),
))
db.send_create_signal('images', ['Value2'])
# Adding model 'Value1'
db.create_table('images_value1', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Source'])),
))
db.send_create_signal('images', ['Value1'])
# Adding model 'Value3'
db.create_table('images_value3', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Source'])),
))
db.send_create_signal('images', ['Value3'])
# Adding model 'Value4'
db.create_table('images_value4', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Source'])),
))
db.send_create_signal('images', ['Value4'])
# Adding model 'Value5'
db.create_table('images_value5', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Source'])),
))
db.send_create_signal('images', ['Value5'])
# Deleting field 'Metadata.width'
db.delete_column('images_metadata', 'width')
# Deleting field 'Metadata.height'
db.delete_column('images_metadata', 'height')
# Adding field 'Metadata.photo_date'
db.add_column('images_metadata', 'photo_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'Metadata.camera'
db.add_column('images_metadata', 'camera', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True), keep_default=False)
# Adding field 'Metadata.strobes'
db.add_column('images_metadata', 'strobes', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True), keep_default=False)
# Adding field 'Metadata.value1'
db.add_column('images_metadata', 'value1', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Value1'], null=True, blank=True), keep_default=False)
# Adding field 'Metadata.value2'
db.add_column('images_metadata', 'value2', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Value2'], null=True, blank=True), keep_default=False)
# Adding field 'Metadata.value3'
db.add_column('images_metadata', 'value3', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Value3'], null=True, blank=True), keep_default=False)
# Adding field 'Metadata.value4'
db.add_column('images_metadata', 'value4', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Value4'], null=True, blank=True), keep_default=False)
# Adding field 'Metadata.value5'
db.add_column('images_metadata', 'value5', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['images.Value5'], null=True, blank=True), keep_default=False)
# Adding field 'Metadata.group2_percent'
db.add_column('images_metadata', 'group2_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.group1_percent'
db.add_column('images_metadata', 'group1_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.group7_percent'
db.add_column('images_metadata', 'group7_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.group6_percent'
db.add_column('images_metadata', 'group6_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.group4_percent'
db.add_column('images_metadata', 'group4_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.group5_percent'
db.add_column('images_metadata', 'group5_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.group3_percent'
db.add_column('images_metadata', 'group3_percent', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Changing field 'Metadata.pixel_cm_ratio'
db.alter_column('images_metadata', 'pixel_cm_ratio', self.gf('django.db.models.fields.CharField')(max_length=45, null=True))
# Changing field 'Metadata.description'
db.alter_column('images_metadata', 'description', self.gf('django.db.models.fields.TextField')(max_length=1000))
# Changing field 'Metadata.name'
db.alter_column('images_metadata', 'name', self.gf('django.db.models.fields.CharField')(max_length=200))
# Adding field 'Source.default_total_points'
db.add_column('images_source', 'default_total_points', self.gf('django.db.models.fields.IntegerField')(default=50), keep_default=False)
# Deleting field 'Image.camera'
db.delete_column('images_image', 'camera_id')
# Adding field 'Image.original_file'
db.add_column('images_image', 'original_file', self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100), keep_default=False)
# Adding field 'Image.original_width'
db.add_column('images_image', 'original_width', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Image.original_height'
db.add_column('images_image', 'original_height', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Image.upload_date'
db.add_column('images_image', 'upload_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.date(2000, 1, 1), blank=True), keep_default=False)
# Adding field 'Image.uploaded_by'
db.add_column('images_image', 'uploaded_by', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']), keep_default=False)
# Adding field 'Image.metadata'
db.add_column('images_image', 'metadata', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['images.Metadata']), keep_default=False)
def backwards(self, orm):
# Deleting model 'Value2'
db.delete_table('images_value2')
# Deleting model 'Value1'
db.delete_table('images_value1')
# Deleting model 'Value3'
db.delete_table('images_value3')
# Deleting model 'Value4'
db.delete_table('images_value4')
# Deleting model 'Value5'
db.delete_table('images_value5')
# Adding field 'Metadata.width'
db.add_column('images_metadata', 'width', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Metadata.height'
db.add_column('images_metadata', 'height', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Deleting field 'Metadata.photo_date'
db.delete_column('images_metadata', 'photo_date')
# Deleting field 'Metadata.camera'
db.delete_column('images_metadata', 'camera')
# Deleting field 'Metadata.strobes'
db.delete_column('images_metadata', 'strobes')
# Deleting field 'Metadata.value1'
db.delete_column('images_metadata', 'value1_id')
# Deleting field 'Metadata.value2'
db.delete_column('images_metadata', 'value2_id')
# Deleting field 'Metadata.value3'
db.delete_column('images_metadata', 'value3_id')
# Deleting field 'Metadata.value4'
db.delete_column('images_metadata', 'value4_id')
# Deleting field 'Metadata.value5'
db.delete_column('images_metadata', 'value5_id')
# Deleting field 'Metadata.group2_percent'
db.delete_column('images_metadata', 'group2_percent')
# Deleting field 'Metadata.group1_percent'
db.delete_column('images_metadata', 'group1_percent')
# Deleting field 'Metadata.group7_percent'
db.delete_column('images_metadata', 'group7_percent')
# Deleting field 'Metadata.group6_percent'
db.delete_column('images_metadata', 'group6_percent')
# Deleting field 'Metadata.group4_percent'
db.delete_column('images_metadata', 'group4_percent')
# Deleting field 'Metadata.group5_percent'
db.delete_column('images_metadata', 'group5_percent')
# Deleting field 'Metadata.group3_percent'
db.delete_column('images_metadata', 'group3_percent')
# Changing field 'Metadata.pixel_cm_ratio'
db.alter_column('images_metadata', 'pixel_cm_ratio', self.gf('django.db.models.fields.IntegerField')(default=0))
# Changing field 'Metadata.description'
db.alter_column('images_metadata', 'description', self.gf('django.db.models.fields.CharField')(max_length=45))
# Changing field 'Metadata.name'
db.alter_column('images_metadata', 'name', self.gf('django.db.models.fields.CharField')(max_length=45))
# Deleting field 'Source.default_total_points'
db.delete_column('images_source', 'default_total_points')
# Adding field 'Image.camera'
db.add_column('images_image', 'camera', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['images.Metadata']), keep_default=False)
# Deleting field 'Image.original_file'
db.delete_column('images_image', 'original_file')
# Deleting field 'Image.original_width'
db.delete_column('images_image', 'original_width')
# Deleting field 'Image.original_height'
db.delete_column('images_image', 'original_height')
# Deleting field 'Image.upload_date'
db.delete_column('images_image', 'upload_date')
# Deleting field 'Image.uploaded_by'
db.delete_column('images_image', 'uploaded_by_id')
# Deleting field 'Image.metadata'
db.delete_column('images_image', 'metadata_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Metadata']"}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'original_height': ('django.db.models.fields.IntegerField', [], {}),
'original_width': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'total_points': ('django.db.models.fields.IntegerField', [], {}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'images.metadata': {
'Meta': {'object_name': 'Metadata'},
'camera': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'group1_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group2_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group3_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group4_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group5_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group6_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group7_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'photo_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'pixel_cm_ratio': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'strobes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'value1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value1']", 'null': 'True', 'blank': 'True'}),
'value2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value2']", 'null': 'True', 'blank': 'True'}),
'value3': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value3']", 'null': 'True', 'blank': 'True'}),
'value4': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value4']", 'null': 'True', 'blank': 'True'}),
'value5': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value5']", 'null': 'True', 'blank': 'True'}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_total_points': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
},
'images.value1': {
'Meta': {'object_name': 'Value1'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value2': {
'Meta': {'object_name': 'Value2'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value3': {
'Meta': {'object_name': 'Value3'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value4': {
'Meta': {'object_name': 'Value4'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value5': {
'Meta': {'object_name': 'Value5'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
}
}
complete_apps = ['images']
|
sgraham/nope
|
refs/heads/master
|
tools/cygprofile/mergetraces.py
|
20
|
#!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Use: ../mergetraces.py `ls cyglog.* -Sr` > merged_cyglog
""""Merge multiple logs files from different processes into a single log.
Given two log files of execution traces, merge the traces into a single trace.
Merging will use timestamps (i.e. the first two columns of logged calls) to
create a single log that is an ordered trace of calls by both processes.
"""
import optparse
import string
import sys
def ParseLogLines(lines):
"""Parse log file lines.
Args:
lines: lines from log file produced by profiled run
Below is an example of a small log file:
5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so
secs usecs pid:threadid func
START
1314897086 795828 3587:1074648168 0x509e105c
1314897086 795874 3587:1074648168 0x509e0eb4
1314897086 796326 3587:1074648168 0x509e0e3c
1314897086 796552 3587:1074648168 0x509e07bc
END
Returns:
tuple conisiting of 1) an ordered list of the logged calls, as an array of
fields, 2) the virtual start address of the library, used to compute the
offset of the symbol in the library and 3) the virtual end address
"""
call_lines = []
vm_start = 0
vm_end = 0
dash_index = lines[0].find ('-')
space_index = lines[0].find (' ')
vm_start = int (lines[0][:dash_index], 16)
vm_end = int (lines[0][dash_index+1:space_index], 16)
for line in lines[2:]:
line = line.strip()
# print hex (vm_start)
fields = line.split()
call_lines.append (fields)
return (call_lines, vm_start, vm_end)
def HasDuplicates(calls):
"""Makes sure that calls are only logged once.
Args:
calls: list of calls logged
Returns:
boolean indicating if calls has duplicate calls
"""
seen = set([])
for call in calls:
if call[3] in seen:
return True
seen.add(call[3])
return False
def CheckTimestamps(calls):
"""Prints warning to stderr if the call timestamps are not in order.
Args:
calls: list of calls logged
"""
index = 0
last_timestamp_secs = -1
last_timestamp_us = -1
while (index < len (calls)):
timestamp_secs = int (calls[index][0])
timestamp_us = int (calls[index][1])
timestamp = (timestamp_secs * 1000000) + timestamp_us
last_timestamp = (last_timestamp_secs * 1000000) + last_timestamp_us
if (timestamp < last_timestamp):
raise Exception("last_timestamp: " + str(last_timestamp_secs)
+ " " + str(last_timestamp_us) + " timestamp: "
+ str(timestamp_secs) + " " + str(timestamp_us) + "\n")
last_timestamp_secs = timestamp_secs
last_timestamp_us = timestamp_us
index = index + 1
def Convert (call_lines, startAddr, endAddr):
"""Converts the call addresses to static offsets and removes invalid calls.
Removes profiled calls not in shared library using start and end virtual
addresses, converts strings to integer values, coverts virtual addresses to
address in shared library.
Returns:
list of calls as tuples (sec, usec, pid:tid, callee)
"""
converted_calls = []
call_addresses = set()
for fields in call_lines:
secs = int (fields[0])
usecs = int (fields[1])
callee = int (fields[3], 16)
# print ("callee: " + hex (callee) + " start: " + hex (startAddr) + " end: "
# + hex (endAddr))
if (callee >= startAddr and callee < endAddr
and (not callee in call_addresses)):
converted_calls.append((secs, usecs, fields[2], (callee - startAddr)))
call_addresses.add(callee)
return converted_calls
def Timestamp(trace_entry):
return int (trace_entry[0]) * 1000000 + int(trace_entry[1])
def AddTrace (tracemap, trace):
"""Adds a trace to the tracemap.
Adds entries in the trace to the tracemap. All new calls will be added to
the tracemap. If the calls already exist in the tracemap then they will be
replaced if they happened sooner in the new trace.
Args:
tracemap: the tracemap
trace: the trace
"""
for trace_entry in trace:
call = trace_entry[3]
if (not call in tracemap) or (
Timestamp(tracemap[call]) > Timestamp(trace_entry)):
tracemap[call] = trace_entry
def GroupByProcessAndThreadId(input_trace):
"""Returns an array of traces grouped by pid and tid.
This is used to make the order of functions not depend on thread scheduling
which can be greatly impacted when profiling is done with cygprofile. As a
result each thread has its own contiguous segment of code (ordered by
timestamp) and processes also have their code isolated (i.e. not interleaved).
"""
def MakeTimestamp(sec, usec):
return sec * 1000000 + usec
def PidAndTidFromString(pid_and_tid):
strings = pid_and_tid.split(':')
return (int(strings[0]), int(strings[1]))
tid_to_pid_map = {}
pid_first_seen = {}
tid_first_seen = {}
for (sec, usec, pid_and_tid, _) in input_trace:
(pid, tid) = PidAndTidFromString(pid_and_tid)
# Make sure that thread IDs are unique since this is a property we rely on.
if tid_to_pid_map.setdefault(tid, pid) != pid:
raise Exception(
'Seen PIDs %d and %d for TID=%d. Thread-IDs must be unique' % (
tid_to_pid_map[tid], pid, tid))
if not pid in pid_first_seen:
pid_first_seen[pid] = MakeTimestamp(sec, usec)
if not tid in tid_first_seen:
tid_first_seen[tid] = MakeTimestamp(sec, usec)
def CompareEvents(event1, event2):
(sec1, usec1, pid_and_tid, _) = event1
(pid1, tid1) = PidAndTidFromString(pid_and_tid)
(sec2, usec2, pid_and_tid, _) = event2
(pid2, tid2) = PidAndTidFromString(pid_and_tid)
pid_cmp = cmp(pid_first_seen[pid1], pid_first_seen[pid2])
if pid_cmp != 0:
return pid_cmp
tid_cmp = cmp(tid_first_seen[tid1], tid_first_seen[tid2])
if tid_cmp != 0:
return tid_cmp
return cmp(MakeTimestamp(sec1, usec1), MakeTimestamp(sec2, usec2))
return sorted(input_trace, cmp=CompareEvents)
def main():
"""Merge two traces for code in specified library and write to stdout.
Merges the two traces and coverts the virtual addresses to the offsets in the
library. First line of merged trace has dummy virtual address of 0-ffffffff
so that symbolizing the addresses uses the addresses in the log, since the
addresses have already been converted to static offsets.
"""
parser = optparse.OptionParser('usage: %prog trace1 ... traceN')
(_, args) = parser.parse_args()
if len(args) <= 1:
parser.error('expected at least the following args: trace1 trace2')
step = 0
# Maps function addresses to their corresponding trace entry.
tracemap = dict()
for trace_file in args:
step += 1
sys.stderr.write(" " + str(step) + "/" + str(len(args)) +
": " + trace_file + ":\n")
trace_lines = map(string.rstrip, open(trace_file).readlines())
(trace_calls, trace_start, trace_end) = ParseLogLines(trace_lines)
CheckTimestamps(trace_calls)
sys.stderr.write("Len: " + str(len(trace_calls)) +
". Start: " + hex(trace_start) +
", end: " + hex(trace_end) + '\n')
trace_calls = Convert(trace_calls, trace_start, trace_end)
sys.stderr.write("Converted len: " + str(len(trace_calls)) + "\n")
AddTrace(tracemap, trace_calls)
sys.stderr.write("Merged len: " + str(len(tracemap)) + "\n")
# Extract the resulting trace from the tracemap
merged_trace = []
for call in tracemap:
merged_trace.append(tracemap[call])
merged_trace.sort(key=Timestamp)
grouped_trace = GroupByProcessAndThreadId(merged_trace)
print "0-ffffffff r-xp 00000000 xx:00 00000 ./"
print "secs\tusecs\tpid:threadid\tfunc"
for call in grouped_trace:
print (str(call[0]) + "\t" + str(call[1]) + "\t" + call[2] + "\t" +
hex(call[3]))
if __name__ == '__main__':
main()
|
DamCB/tyssue
|
refs/heads/master
|
tests/core/test_monolayer.py
|
2
|
from numpy.testing import assert_array_equal
import numpy as np
from tyssue.generation import extrude, three_faces_sheet
from tyssue import Monolayer, config, Sheet
from tyssue.core.monolayer import MonolayerWithLamina
def test_monolayer():
sheet = Sheet("test", *three_faces_sheet())
mono = Monolayer.from_flat_sheet("test", sheet, config.geometry.bulk_spec())
assert_array_equal(mono.apical_verts.values, np.arange(13))
assert_array_equal(mono.basal_verts.values, np.arange(13) + 13)
assert_array_equal(mono.apical_edges.values, np.arange(18))
assert_array_equal(mono.basal_edges.values, np.arange(18) + 18)
assert_array_equal(mono.lateral_edges.values, np.arange(72) + 36)
assert_array_equal(mono.apical_faces.values, np.arange(3))
assert_array_equal(mono.basal_faces.values, np.arange(3) + 3)
assert_array_equal(mono.lateral_faces.values, np.arange(18) + 6)
def test_monolayer_with_lamina():
sheet_dsets, _ = three_faces_sheet()
dsets = extrude(sheet_dsets, method="translation")
mono = MonolayerWithLamina("test", dsets, config.geometry.bulk_spec())
assert mono.lamina_edges.shape == (3,)
def test_copy():
datasets, specs = three_faces_sheet()
extruded = extrude(datasets, method="translation")
mono = Monolayer("test", extruded, config.geometry.bulk_spec())
assert mono.Nc == 3
assert mono.Nf == 24
assert mono.Ne == 108
assert mono.Nv == 26
mono2 = mono.copy()
assert mono2.Nc == 3
assert isinstance(mono2, Monolayer)
|
vnsofthe/odoo-dev
|
refs/heads/master
|
addons/only_single_user/__openerp__.py
|
1
|
# -*- coding: utf-8 -*-
#
{
'name': 'only_single_user',
'version': '0.1',
'category': 'web',
'sequence': 23,
'summary': 'Only single user login',
'description': """
Only single user login,other session is logout for this id when user login.
""",
'author': 'VnSoft',
'website': 'http://blog.csdn.net/vnsoft',
# 'images': ['images/Sale_order_line_to_invoice.jpeg','images/sale_order.jpeg','images/sales_analysis.jpeg'],
'depends': ['base', 'web',],
'data': ["data.xml"],
"qweb":[],
'demo': [],
'test': [],
'js': [ ],
'installable': True,
'auto_install': False,
'application': False,
}
|
eeshangarg/oh-mainline
|
refs/heads/master
|
vendor/packages/gdata/tests/gdata_tests/health/service_test.py
|
127
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.eric@google.com (Eric Bidelman)'
import getpass
import unittest
from gdata import test_data
import gdata.health
import gdata.health.service
username = ''
password = ''
class HealthQueryProfileListTest(unittest.TestCase):
def setUp(self):
self.health = gdata.health.service.HealthService()
self.health.ClientLogin(username, password, source='Health Client Unit Tests')
self.profile_list_feed = self.health.GetProfileListFeed()
def testGetProfileListFeed(self):
self.assert_(isinstance(self.profile_list_feed,
gdata.health.ProfileListFeed))
self.assertEqual(self.profile_list_feed.id.text,
'https://www.google.com/health/feeds/profile/list')
first_entry = self.profile_list_feed.entry[0]
self.assert_(isinstance(first_entry, gdata.health.ProfileListEntry))
self.assert_(first_entry.GetProfileId() is not None)
self.assert_(first_entry.GetProfileName() is not None)
query = gdata.health.service.HealthProfileListQuery()
profile_list = self.health.GetProfileListFeed(query)
self.assertEqual(first_entry.GetProfileId(),
profile_list.entry[0].GetProfileId())
self.assertEqual(profile_list.id.text,
'https://www.google.com/health/feeds/profile/list')
class H9QueryProfileListTest(unittest.TestCase):
def setUp(self):
self.h9 = gdata.health.service.HealthService(use_h9_sandbox=True)
self.h9.ClientLogin(username, password, source='H9 Client Unit Tests')
self.profile_list_feed = self.h9.GetProfileListFeed()
def testGetProfileListFeed(self):
self.assert_(isinstance(self.profile_list_feed,
gdata.health.ProfileListFeed))
self.assertEqual(self.profile_list_feed.id.text,
'https://www.google.com/h9/feeds/profile/list')
first_entry = self.profile_list_feed.entry[0]
self.assert_(isinstance(first_entry, gdata.health.ProfileListEntry))
self.assert_(first_entry.GetProfileId() is not None)
self.assert_(first_entry.GetProfileName() is not None)
query = gdata.health.service.HealthProfileListQuery()
profile_list = self.h9.GetProfileListFeed(query)
self.assertEqual(first_entry.GetProfileId(),
profile_list.entry[0].GetProfileId())
self.assertEqual(profile_list.id.text,
'https://www.google.com/h9/feeds/profile/list')
class HealthQueryProfileTest(unittest.TestCase):
def setUp(self):
self.health = gdata.health.service.HealthService()
self.health.ClientLogin(username, password, source='Health Client Unit Tests')
self.profile_list_feed = self.health.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testGetProfileFeed(self):
feed = self.health.GetProfileFeed(profile_id=self.profile_id)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(isinstance(feed.entry[0].ccr, gdata.health.Ccr))
def testGetProfileFeedByQuery(self):
query = gdata.health.service.HealthProfileQuery(
projection='ui', profile_id=self.profile_id)
feed = self.health.GetProfileFeed(query=query)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(feed.entry[0].ccr is not None)
def testGetProfileDigestFeed(self):
query = gdata.health.service.HealthProfileQuery(
projection='ui', profile_id=self.profile_id,
params={'digest': 'true'})
feed = self.health.GetProfileFeed(query=query)
self.assertEqual(len(feed.entry), 1)
def testGetMedicationsAndConditions(self):
query = gdata.health.service.HealthProfileQuery(
projection='ui', profile_id=self.profile_id,
params={'digest': 'true'}, categories=['medication|condition'])
feed = self.health.GetProfileFeed(query=query)
self.assertEqual(len(feed.entry), 1)
if feed.entry[0].ccr.GetMedications() is not None:
self.assert_(feed.entry[0].ccr.GetMedications()[0] is not None)
self.assert_(feed.entry[0].ccr.GetConditions()[0] is not None)
self.assert_(feed.entry[0].ccr.GetAllergies() is None)
self.assert_(feed.entry[0].ccr.GetAlerts() is None)
self.assert_(feed.entry[0].ccr.GetResults() is None)
class H9QueryProfileTest(unittest.TestCase):
def setUp(self):
self.h9 = gdata.health.service.HealthService(use_h9_sandbox=True)
self.h9.ClientLogin(username, password, source='H9 Client Unit Tests')
self.profile_list_feed = self.h9.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testGetProfileFeed(self):
feed = self.h9.GetProfileFeed(profile_id=self.profile_id)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(feed.entry[0].ccr is not None)
def testGetProfileFeedByQuery(self):
query = gdata.health.service.HealthProfileQuery(
service='h9', projection='ui', profile_id=self.profile_id)
feed = self.h9.GetProfileFeed(query=query)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(feed.entry[0].ccr is not None)
class HealthNoticeTest(unittest.TestCase):
def setUp(self):
self.health = gdata.health.service.HealthService()
self.health.ClientLogin(username, password, source='Health Client Unit Tests')
self.profile_list_feed = self.health.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testSendNotice(self):
subject_line = 'subject line'
body = 'Notice <b>body</b>.'
ccr_xml = test_data.HEALTH_CCR_NOTICE_PAYLOAD
created_entry = self.health.SendNotice(subject_line,
body,
ccr=ccr_xml,
profile_id=self.profile_id)
self.assertEqual(created_entry.title.text, subject_line)
self.assertEqual(created_entry.content.text, body)
self.assertEqual(created_entry.content.type, 'html')
problem = created_entry.ccr.GetProblems()[0]
problem_desc = problem.FindChildren('Description')[0]
name = problem_desc.FindChildren('Text')[0]
self.assertEqual(name.text, 'Aortic valve disorders')
class H9NoticeTest(unittest.TestCase):
def setUp(self):
self.h9 = gdata.health.service.HealthService(use_h9_sandbox=True)
self.h9.ClientLogin(username, password, source='H9 Client Unit Tests')
self.profile_list_feed = self.h9.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testSendNotice(self):
subject_line = 'subject line'
body = 'Notice <b>body</b>.'
ccr_xml = test_data.HEALTH_CCR_NOTICE_PAYLOAD
created_entry = self.h9.SendNotice(subject_line, body, ccr=ccr_xml,
profile_id=self.profile_id)
self.assertEqual(created_entry.title.text, subject_line)
self.assertEqual(created_entry.content.text, body)
self.assertEqual(created_entry.content.type, 'html')
problem = created_entry.ccr.GetProblems()[0]
problem_desc = problem.FindChildren('Description')[0]
name = problem_desc.FindChildren('Text')[0]
self.assertEqual(name.text, 'Aortic valve disorders')
if __name__ == '__main__':
print ('Health API Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
|
fmoreyra/ReservaLibrosISOO
|
refs/heads/master
|
login/models.py
|
1
|
import uuid
from datetime import datetime, date
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from login.choices import *
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
legajo = models.CharField(
max_length=255,
unique=True,
verbose_name=_('Legajo'),
blank=True)
apellido = models.CharField(
max_length=255,
verbose_name=_('Apellido'))
nombre = models.CharField(
max_length=255,
verbose_name=_('Nombre'))
documento = models.CharField(
max_length=255,
verbose_name=_('Número de documento'),
unique=True,
blank=True)
fecha_nacimiento = models.DateField(
verbose_name=_('Fecha de Nacimiento'),
blank=True,
null=True)
carrera = models. CharField(
max_length=255,
choices=CARRERA,
default=CARRERA[0][0],
verbose_name=_("Carrera")
)
@property
def nombre_completo(self):
return "{0}, {1}".format(
self.apellido.upper(),
self.nombre)
@property
def edad(self):
delta = (date.today() - self.fecha_nacimiento)
return int(delta.days / 365.2425)
def __str__(self):
return "{0}, {1} {2}, {3}".format(
self.legajo,
self.nombre,
self.apellido,
self.carrera)
def save(self, *args,**kwargs):
self.validate_unique()
super(Profile,self).save(*args, **kwargs)
class Meta:
verbose_name = _("Perfil")
verbose_name_plural = _("Perfiles")
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
Vitallium/qtwebkit
|
refs/heads/phantomjs
|
Source/ThirdParty/gtest/test/gtest_xml_test_utils.py
|
306
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
An exception is any attribute named "time", which needs only be
convertible to a floating-point number.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
"attribute numbers differ in element " + actual_node.tagName)
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
"expected attribute %s not found in element %s" %
(expected_attr.name, actual_node.tagName))
self.assertEquals(expected_attr.value, actual_attr.value,
" values of attribute %s in element %s differ" %
(expected_attr.name, actual_node.tagName))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
"number of child elements differ in element " + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
"testsuites": "name",
"testsuite": "name",
"testcase": "name",
"failure": "message",
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
"Encountered unknown element <%s>" % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if "detail" not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children["detail"] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children["detail"].nodeValue += child.nodeValue
else:
self.fail("Encountered unexpected node type %d" % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The line number reported in the first line of the "message"
attribute of <failure> elements is replaced with a single asterisk.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName in ("testsuites", "testsuite", "testcase"):
time = element.getAttributeNode("time")
time.value = re.sub(r"^\d+(\.\d+)?$", "*", time.value)
elif element.tagName == "failure":
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Removes the source line number.
cdata = re.sub(r"^.*[/\\](.*:)\d+\n", "\\1*\n", child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r"\nStack trace:\n(.|\n)*",
"", cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
|
iwaseyusuke/ryu
|
refs/heads/master
|
ryu/lib/packet/zebra.py
|
4
|
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Zebra protocol parser/serializer
Zebra Protocol is used to communicate with the zebra daemon.
"""
import abc
import socket
import struct
import logging
from distutils.version import LooseVersion
import netaddr
import six
from ryu import flags as cfg_flags # For loading 'zapi' option definition
from ryu.cfg import CONF
from ryu.lib import addrconv
from ryu.lib import ip
from ryu.lib import stringify
from ryu.lib import type_desc
from . import packet_base
from . import bgp
from . import safi as packet_safi
LOG = logging.getLogger(__name__)
# Default Zebra protocol version
_DEFAULT_VERSION = 3
_DEFAULT_FRR_VERSION = 4
_FRR_VERSION_2_0 = LooseVersion('2.0')
_FRR_VERSION_3_0 = LooseVersion('3.0')
# Constants in quagga/lib/zebra.h
# Default Zebra TCP port
ZEBRA_PORT = 2600
# Zebra message types
ZEBRA_INTERFACE_ADD = 1
ZEBRA_INTERFACE_DELETE = 2
ZEBRA_INTERFACE_ADDRESS_ADD = 3
ZEBRA_INTERFACE_ADDRESS_DELETE = 4
ZEBRA_INTERFACE_UP = 5
ZEBRA_INTERFACE_DOWN = 6
ZEBRA_IPV4_ROUTE_ADD = 7
ZEBRA_IPV4_ROUTE_DELETE = 8
ZEBRA_IPV6_ROUTE_ADD = 9
ZEBRA_IPV6_ROUTE_DELETE = 10
ZEBRA_REDISTRIBUTE_ADD = 11
ZEBRA_REDISTRIBUTE_DELETE = 12
ZEBRA_REDISTRIBUTE_DEFAULT_ADD = 13
ZEBRA_REDISTRIBUTE_DEFAULT_DELETE = 14
ZEBRA_IPV4_NEXTHOP_LOOKUP = 15
ZEBRA_IPV6_NEXTHOP_LOOKUP = 16
ZEBRA_IPV4_IMPORT_LOOKUP = 17
ZEBRA_IPV6_IMPORT_LOOKUP = 18
ZEBRA_INTERFACE_RENAME = 19
ZEBRA_ROUTER_ID_ADD = 20
ZEBRA_ROUTER_ID_DELETE = 21
ZEBRA_ROUTER_ID_UPDATE = 22
ZEBRA_HELLO = 23
ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB = 24
ZEBRA_VRF_UNREGISTER = 25
ZEBRA_INTERFACE_LINK_PARAMS = 26
ZEBRA_NEXTHOP_REGISTER = 27
ZEBRA_NEXTHOP_UNREGISTER = 28
ZEBRA_NEXTHOP_UPDATE = 29
ZEBRA_MESSAGE_MAX = 30
# Zebra message types on FRRouting
FRR_ZEBRA_INTERFACE_ADD = 0
FRR_ZEBRA_INTERFACE_DELETE = 1
FRR_ZEBRA_INTERFACE_ADDRESS_ADD = 2
FRR_ZEBRA_INTERFACE_ADDRESS_DELETE = 3
FRR_ZEBRA_INTERFACE_UP = 4
FRR_ZEBRA_INTERFACE_DOWN = 5
FRR_ZEBRA_IPV4_ROUTE_ADD = 6
FRR_ZEBRA_IPV4_ROUTE_DELETE = 7
FRR_ZEBRA_IPV6_ROUTE_ADD = 8
FRR_ZEBRA_IPV6_ROUTE_DELETE = 9
FRR_ZEBRA_REDISTRIBUTE_ADD = 10
FRR_ZEBRA_REDISTRIBUTE_DELETE = 11
FRR_ZEBRA_REDISTRIBUTE_DEFAULT_ADD = 12
FRR_ZEBRA_REDISTRIBUTE_DEFAULT_DELETE = 13
FRR_ZEBRA_ROUTER_ID_ADD = 14
FRR_ZEBRA_ROUTER_ID_DELETE = 15
FRR_ZEBRA_ROUTER_ID_UPDATE = 16
FRR_ZEBRA_HELLO = 17
FRR_ZEBRA_NEXTHOP_REGISTER = 18
FRR_ZEBRA_NEXTHOP_UNREGISTER = 19
FRR_ZEBRA_NEXTHOP_UPDATE = 20
FRR_ZEBRA_INTERFACE_NBR_ADDRESS_ADD = 21
FRR_ZEBRA_INTERFACE_NBR_ADDRESS_DELETE = 22
FRR_ZEBRA_INTERFACE_BFD_DEST_UPDATE = 23
FRR_ZEBRA_IMPORT_ROUTE_REGISTER = 24
FRR_ZEBRA_IMPORT_ROUTE_UNREGISTER = 25
FRR_ZEBRA_IMPORT_CHECK_UPDATE = 26
FRR_ZEBRA_IPV4_ROUTE_IPV6_NEXTHOP_ADD = 27
FRR_ZEBRA_BFD_DEST_REGISTER = 28
FRR_ZEBRA_BFD_DEST_DEREGISTER = 29
FRR_ZEBRA_BFD_DEST_UPDATE = 30
FRR_ZEBRA_BFD_DEST_REPLAY = 31
FRR_ZEBRA_REDISTRIBUTE_IPV4_ADD = 32
FRR_ZEBRA_REDISTRIBUTE_IPV4_DEL = 33
FRR_ZEBRA_REDISTRIBUTE_IPV6_ADD = 34
FRR_ZEBRA_REDISTRIBUTE_IPV6_DEL = 35
FRR_ZEBRA_VRF_UNREGISTER = 36
FRR_ZEBRA_VRF_ADD = 37
FRR_ZEBRA_VRF_DELETE = 38
FRR_ZEBRA_INTERFACE_VRF_UPDATE = 39
FRR_ZEBRA_BFD_CLIENT_REGISTER = 40
FRR_ZEBRA_INTERFACE_ENABLE_RADV = 41
FRR_ZEBRA_INTERFACE_DISABLE_RADV = 42
FRR_ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB = 43
FRR_ZEBRA_INTERFACE_LINK_PARAMS = 44
FRR_ZEBRA_MPLS_LABELS_ADD = 45
FRR_ZEBRA_MPLS_LABELS_DELETE = 46
FRR_ZEBRA_IPV4_NEXTHOP_ADD = 47
FRR_ZEBRA_IPV4_NEXTHOP_DELETE = 48
FRR_ZEBRA_IPV6_NEXTHOP_ADD = 49
FRR_ZEBRA_IPV6_NEXTHOP_DELETE = 50
# Zebra route types
ZEBRA_ROUTE_SYSTEM = 0
ZEBRA_ROUTE_KERNEL = 1
ZEBRA_ROUTE_CONNECT = 2
ZEBRA_ROUTE_STATIC = 3
ZEBRA_ROUTE_RIP = 4
ZEBRA_ROUTE_RIPNG = 5
ZEBRA_ROUTE_OSPF = 6
ZEBRA_ROUTE_OSPF6 = 7
ZEBRA_ROUTE_ISIS = 8
ZEBRA_ROUTE_BGP = 9
ZEBRA_ROUTE_PIM = 10
ZEBRA_ROUTE_HSLS = 11
ZEBRA_ROUTE_OLSR = 12
ZEBRA_ROUTE_BABEL = 13
ZEBRA_ROUTE_MAX = 14
# Zebra route types on FRRouting
FRR_ZEBRA_ROUTE_SYSTEM = 0
FRR_ZEBRA_ROUTE_KERNEL = 1
FRR_ZEBRA_ROUTE_CONNECT = 2
FRR_ZEBRA_ROUTE_STATIC = 3
FRR_ZEBRA_ROUTE_RIP = 4
FRR_ZEBRA_ROUTE_RIPNG = 5
FRR_ZEBRA_ROUTE_OSPF = 6
FRR_ZEBRA_ROUTE_OSPF6 = 7
FRR_ZEBRA_ROUTE_ISIS = 8
FRR_ZEBRA_ROUTE_BGP = 9
FRR_ZEBRA_ROUTE_PIM = 10
FRR_ZEBRA_ROUTE_HSLS = 11
FRR_ZEBRA_ROUTE_OLSR = 12
FRR_ZEBRA_ROUTE_TABLE = 13
FRR_ZEBRA_ROUTE_LDP = 14
FRR_ZEBRA_ROUTE_VNC = 15
FRR_ZEBRA_ROUTE_VNC_DIRECT = 16
FRR_ZEBRA_ROUTE_VNC_DIRECT_RH = 17
FRR_ZEBRA_ROUTE_BGP_DIRECT = 18
FRR_ZEBRA_ROUTE_BGP_DIRECT_EXT = 19
FRR_ZEBRA_ROUTE_ALL = 20
FRR_ZEBRA_ROUTE_MAX = 21
# Zebra message flags
ZEBRA_FLAG_INTERNAL = 0x01
ZEBRA_FLAG_SELFROUTE = 0x02
ZEBRA_FLAG_BLACKHOLE = 0x04
ZEBRA_FLAG_IBGP = 0x08
ZEBRA_FLAG_SELECTED = 0x10
ZEBRA_FLAG_FIB_OVERRIDE = 0x20
ZEBRA_FLAG_STATIC = 0x40
ZEBRA_FLAG_REJECT = 0x80
# Zebra message flags on FRRouting
FRR_ZEBRA_FLAG_INTERNAL = 0x01
FRR_ZEBRA_FLAG_SELFROUTE = 0x02
FRR_ZEBRA_FLAG_BLACKHOLE = 0x04
FRR_ZEBRA_FLAG_IBGP = 0x08
FRR_ZEBRA_FLAG_SELECTED = 0x10
FRR_ZEBRA_FLAG_STATIC = 0x40
FRR_ZEBRA_FLAG_REJECT = 0x80
FRR_ZEBRA_FLAG_SCOPE_LINK = 0x100
FRR_ZEBRA_FLAG_FIB_OVERRIDE = 0x200
# Zebra nexthop flags
ZEBRA_NEXTHOP_IFINDEX = 1
ZEBRA_NEXTHOP_IFNAME = 2
ZEBRA_NEXTHOP_IPV4 = 3
ZEBRA_NEXTHOP_IPV4_IFINDEX = 4
ZEBRA_NEXTHOP_IPV4_IFNAME = 5
ZEBRA_NEXTHOP_IPV6 = 6
ZEBRA_NEXTHOP_IPV6_IFINDEX = 7
ZEBRA_NEXTHOP_IPV6_IFNAME = 8
ZEBRA_NEXTHOP_BLACKHOLE = 9
# Zebra nexthop flags on FRRouting
FRR_ZEBRA_NEXTHOP_IFINDEX = 1
FRR_ZEBRA_NEXTHOP_IPV4 = 2
FRR_ZEBRA_NEXTHOP_IPV4_IFINDEX = 3
FRR_ZEBRA_NEXTHOP_IPV6 = 4
FRR_ZEBRA_NEXTHOP_IPV6_IFINDEX = 5
FRR_ZEBRA_NEXTHOP_BLACKHOLE = 6
# Constants in quagga/lib/zclient.h
# Zebra API message flags
ZAPI_MESSAGE_NEXTHOP = 0x01
ZAPI_MESSAGE_IFINDEX = 0x02
ZAPI_MESSAGE_DISTANCE = 0x04
ZAPI_MESSAGE_METRIC = 0x08
ZAPI_MESSAGE_MTU = 0x10
ZAPI_MESSAGE_TAG = 0x20
# Zebra API message flags on FRRouting.
# Note: Constants for TAG/MTU is inverted from Quagga version.
FRR_ZAPI_MESSAGE_NEXTHOP = 0x01
FRR_ZAPI_MESSAGE_IFINDEX = 0x02
FRR_ZAPI_MESSAGE_DISTANCE = 0x04
FRR_ZAPI_MESSAGE_METRIC = 0x08
FRR_ZAPI_MESSAGE_TAG = 0x10
FRR_ZAPI_MESSAGE_MTU = 0x20
FRR_ZAPI_MESSAGE_SRCPFX = 0x40
FRR_ZAPI_MESSAGE_LABEL = 0x80
# Constants in quagga/lib/if.h
# Interface name length
# Linux define value in /usr/include/linux/if.h.
# #define IFNAMSIZ 16
# FreeBSD define value in /usr/include/net/if.h.
# #define IFNAMSIZ 16
INTERFACE_NAMSIZE = 20
INTERFACE_HWADDR_MAX = 20
# Zebra internal interface status
ZEBRA_INTERFACE_ACTIVE = 1 << 0
ZEBRA_INTERFACE_SUB = 1 << 1
ZEBRA_INTERFACE_LINKDETECTION = 1 << 2
# Followings are extended on FRRouting
ZEBRA_INTERFACE_VRF_LOOPBACK = 1 << 3
# Zebra interface connected address flags
ZEBRA_IFA_SECONDARY = 1 << 0
ZEBRA_IFA_PEER = 1 << 1
ZEBRA_IFA_UNNUMBERED = 1 << 2
# Zebra link layer types
ZEBRA_LLT_UNKNOWN = 0
ZEBRA_LLT_ETHER = 1
ZEBRA_LLT_EETHER = 2
ZEBRA_LLT_AX25 = 3
ZEBRA_LLT_PRONET = 4
ZEBRA_LLT_IEEE802 = 5
ZEBRA_LLT_ARCNET = 6
ZEBRA_LLT_APPLETLK = 7
ZEBRA_LLT_DLCI = 8
ZEBRA_LLT_ATM = 9
ZEBRA_LLT_METRICOM = 10
ZEBRA_LLT_IEEE1394 = 11
ZEBRA_LLT_EUI64 = 12
ZEBRA_LLT_INFINIBAND = 13
ZEBRA_LLT_SLIP = 14
ZEBRA_LLT_CSLIP = 15
ZEBRA_LLT_SLIP6 = 16
ZEBRA_LLT_CSLIP6 = 17
ZEBRA_LLT_RSRVD = 18
ZEBRA_LLT_ADAPT = 19
ZEBRA_LLT_ROSE = 20
ZEBRA_LLT_X25 = 21
ZEBRA_LLT_PPP = 22
ZEBRA_LLT_CHDLC = 23
ZEBRA_LLT_LAPB = 24
ZEBRA_LLT_RAWHDLC = 25
ZEBRA_LLT_IPIP = 26
ZEBRA_LLT_IPIP6 = 27
ZEBRA_LLT_FRAD = 28
ZEBRA_LLT_SKIP = 29
ZEBRA_LLT_LOOPBACK = 30
ZEBRA_LLT_LOCALTLK = 31
ZEBRA_LLT_FDDI = 32
ZEBRA_LLT_SIT = 33
ZEBRA_LLT_IPDDP = 34
ZEBRA_LLT_IPGRE = 35
ZEBRA_LLT_IP6GRE = 36
ZEBRA_LLT_PIMREG = 37
ZEBRA_LLT_HIPPI = 38
ZEBRA_LLT_ECONET = 39
ZEBRA_LLT_IRDA = 40
ZEBRA_LLT_FCPP = 41
ZEBRA_LLT_FCAL = 42
ZEBRA_LLT_FCPL = 43
ZEBRA_LLT_FCFABRIC = 44
ZEBRA_LLT_IEEE802_TR = 45
ZEBRA_LLT_IEEE80211 = 46
ZEBRA_LLT_IEEE80211_RADIOTAP = 47
ZEBRA_LLT_IEEE802154 = 48
ZEBRA_LLT_IEEE802154_PHY = 49
# Link Parameters Status
LP_UNSET = 0x0000
LP_TE = 0x0001
LP_MAX_BW = 0x0002
LP_MAX_RSV_BW = 0x0004
LP_UNRSV_BW = 0x0008
LP_ADM_GRP = 0x0010
LP_RMT_AS = 0x0020
LP_DELAY = 0x0040
LP_MM_DELAY = 0x0080
LP_DELAY_VAR = 0x0100
LP_PKT_LOSS = 0x0200
LP_RES_BW = 0x0400
LP_AVA_BW = 0x0800
LP_USE_BW = 0x1000
LP_TE_METRIC = 0x2000
# "non-official" architectural constants
MAX_CLASS_TYPE = 8
# Constants in frr/zebra/zebra_ptm.h
# Interface PTM Enable configuration
ZEBRA_IF_PTM_ENABLE_OFF = 0
ZEBRA_IF_PTM_ENABLE_ON = 1
ZEBRA_IF_PTM_ENABLE_UNSPEC = 2
# PTM status
ZEBRA_PTM_STATUS_DOWN = 0
ZEBRA_PTM_STATUS_UP = 1
ZEBRA_PTM_STATUS_UNKNOWN = 2
# Constants in frr/lib/bfd.h
# BFD status
BFD_STATUS_UNKNOWN = 1 << 0
BFD_STATUS_DOWN = 1 << 1
BFD_STATUS_UP = 1 << 2
# Constants in frr/lib/vrf.h
# VRF name length
VRF_NAMSIZ = 36
# Constants in frr/lib/mpls.h
# Reserved MPLS label values
MPLS_V4_EXP_NULL_LABEL = 0
MPLS_RA_LABEL = 1
MPLS_V6_EXP_NULL_LABEL = 2
MPLS_IMP_NULL_LABEL = 3
MPLS_ENTROPY_LABEL_INDICATOR = 7
MPLS_GAL_LABEL = 13
MPLS_OAM_ALERT_LABEL = 14
MPLS_EXTENSION_LABEL = 15
MPLS_MIN_RESERVED_LABEL = 0
MPLS_MAX_RESERVED_LABEL = 15
MPLS_MIN_UNRESERVED_LABEL = 16
MPLS_MAX_UNRESERVED_LABEL = 1048575
# Utility functions/classes
IPv4Prefix = bgp.IPAddrPrefix
IPv6Prefix = bgp.IP6AddrPrefix
def _parse_ip_prefix(family, buf):
if family == socket.AF_INET:
prefix, rest = bgp.IPAddrPrefix.parser(buf)
elif family == socket.AF_INET6:
prefix, rest = IPv6Prefix.parser(buf)
else:
raise struct.error('Unsupported family: %d' % family)
return prefix.prefix, rest
def _serialize_ip_prefix(prefix):
if ip.valid_ipv4(prefix):
prefix_addr, prefix_num = prefix.split('/')
return bgp.IPAddrPrefix(int(prefix_num), prefix_addr).serialize()
elif ip.valid_ipv6(prefix):
prefix_addr, prefix_num = prefix.split('/')
return IPv6Prefix(int(prefix_num), prefix_addr).serialize()
else:
raise ValueError('Invalid prefix: %s' % prefix)
# Family and Zebra Prefix format:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Prefix len |
# +-+-+-+-+-+-+-+-+
_ZEBRA_FAMILY_FMT = '!B' # family
_ZEBRA_FAMILY_SIZE = struct.calcsize(_ZEBRA_FAMILY_FMT)
_ZEBRA_IPV4_PREFIX_FMT = '!4sB' # prefix, prefix_len
_ZEBRA_IPV6_PREFIX_FMT = '!16sB'
_ZEBRA_IPV4_PREFIX_SIZE = struct.calcsize(_ZEBRA_IPV4_PREFIX_FMT)
_ZEBRA_IPV6_PREFIX_SIZE = struct.calcsize(_ZEBRA_IPV6_PREFIX_FMT)
_ZEBRA_FAMILY_IPV4_PREFIX_FMT = '!B4sB' # family, prefix, prefix_len
_ZEBRA_FAMILY_IPV6_PREFIX_FMT = '!B16sB' # family, prefix, prefix_len
def _parse_zebra_family_prefix(buf):
"""
Parses family and prefix in Zebra format.
"""
(family,) = struct.unpack_from(_ZEBRA_FAMILY_FMT, buf)
rest = buf[_ZEBRA_FAMILY_SIZE:]
if socket.AF_INET == family:
(prefix, p_len) = struct.unpack_from(_ZEBRA_IPV4_PREFIX_FMT, rest)
prefix = '%s/%d' % (addrconv.ipv4.bin_to_text(prefix), p_len)
rest = rest[_ZEBRA_IPV4_PREFIX_SIZE:]
elif socket.AF_INET6 == family:
(prefix, p_len) = struct.unpack_from(_ZEBRA_IPV6_PREFIX_FMT, rest)
prefix = '%s/%d' % (addrconv.ipv6.bin_to_text(prefix), p_len)
rest = rest[_ZEBRA_IPV6_PREFIX_SIZE:]
else:
raise struct.error('Unsupported family: %d' % family)
return family, prefix, rest
def _serialize_zebra_family_prefix(prefix):
"""
Serializes family and prefix in Zebra format.
"""
if ip.valid_ipv4(prefix):
family = socket.AF_INET # fixup
prefix_addr, prefix_num = prefix.split('/')
return family, struct.pack(
_ZEBRA_FAMILY_IPV4_PREFIX_FMT,
family,
addrconv.ipv4.text_to_bin(prefix_addr),
int(prefix_num))
elif ip.valid_ipv6(prefix):
family = socket.AF_INET6 # fixup
prefix_addr, prefix_num = prefix.split('/')
return family, struct.pack(
_ZEBRA_FAMILY_IPV6_PREFIX_FMT,
family,
addrconv.ipv6.text_to_bin(prefix_addr),
int(prefix_num))
raise ValueError('Invalid prefix: %s' % prefix)
def _is_frr_version_ge(compared_version):
return CONF['zapi'].frr_version >= compared_version
class InterfaceLinkParams(stringify.StringifyMixin):
"""
Interface Link Parameters class for if_link_params structure.
"""
# Interface Link Parameters structure:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Status of Link Parameters |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Traffic Engineering metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Maximum Bandwidth |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Maximum Reservable Bandwidth |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Number of Unreserved Bandwidth Classes (max is MAX_CLASS_TYPE)|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Unreserved Bandwidth per Class Type |
# | ... repeats Number of Unreserved Bandwidth Classes times |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Administrative group |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Remote AS number |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Remote IP address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Link Average Delay |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Link Min Delay |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Link Max Delay |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Link Delay Variation |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Link Packet Loss |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Residual Bandwidth |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Available Bandwidth |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (float) Utilized Bandwidth |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# lp_status, te_metric, max_bw, max_reserved_bw, bw_cls_num
_HEADER_FMT = '!IIffI'
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_REPEATED_FMT = '!f'
REPEATED_SIZE = struct.calcsize(_REPEATED_FMT)
# admin_group, remote_as, remote_ip,
# average_delay, min_delay, max_delay, delay_var,
# pkt_loss, residual_bw, average_bw, utilized_bw
_FOOTER_FMT = '!II4sIIIIffff'
FOOTER_SIZE = struct.calcsize(_FOOTER_FMT)
def __init__(self, lp_status, te_metric, max_bw, max_reserved_bw,
unreserved_bw, admin_group, remote_as, remote_ip,
average_delay, min_delay, max_delay, delay_var, pkt_loss,
residual_bw, average_bw, utilized_bw):
super(InterfaceLinkParams, self).__init__()
self.lp_status = lp_status
self.te_metric = te_metric
self.max_bw = max_bw
self.max_reserved_bw = max_reserved_bw
assert isinstance(unreserved_bw, (list, tuple))
assert len(unreserved_bw) == MAX_CLASS_TYPE
self.unreserved_bw = unreserved_bw
self.admin_group = admin_group
self.remote_as = remote_as
assert ip.valid_ipv4(remote_ip)
self.remote_ip = remote_ip
self.average_delay = average_delay
self.min_delay = min_delay
self.max_delay = max_delay
self.delay_var = delay_var
self.pkt_loss = pkt_loss
self.residual_bw = residual_bw
self.average_bw = average_bw
self.utilized_bw = utilized_bw
@classmethod
def parse(cls, buf):
(lp_status, te_metric, max_bw, max_reserved_bw,
bw_cls_num) = struct.unpack_from(cls._HEADER_FMT, buf)
if MAX_CLASS_TYPE < bw_cls_num:
bw_cls_num = MAX_CLASS_TYPE
offset = cls.HEADER_SIZE
unreserved_bw = []
for _ in range(bw_cls_num):
(u_bw,) = struct.unpack_from(cls._REPEATED_FMT, buf, offset)
unreserved_bw.append(u_bw)
offset += cls.REPEATED_SIZE
(admin_group, remote_as, remote_ip, average_delay, min_delay,
max_delay, delay_var, pkt_loss, residual_bw, average_bw,
utilized_bw) = struct.unpack_from(
cls._FOOTER_FMT, buf, offset)
offset += cls.FOOTER_SIZE
remote_ip = addrconv.ipv4.bin_to_text(remote_ip)
return cls(lp_status, te_metric, max_bw, max_reserved_bw,
unreserved_bw, admin_group, remote_as, remote_ip,
average_delay, min_delay, max_delay, delay_var, pkt_loss,
residual_bw, average_bw, utilized_bw), buf[offset:]
def serialize(self):
buf = struct.pack(
self._HEADER_FMT, self.lp_status, self.te_metric, self.max_bw,
self.max_reserved_bw, len(self.unreserved_bw))
for u_bw in self.unreserved_bw:
buf += struct.pack(self._REPEATED_FMT, u_bw)
remote_ip = addrconv.ipv4.text_to_bin(self.remote_ip)
buf += struct.pack(
self._FOOTER_FMT, self.admin_group, self.remote_as, remote_ip,
self.average_delay, self.min_delay, self.max_delay,
self.delay_var, self.pkt_loss, self.residual_bw, self.average_bw,
self.utilized_bw)
return buf
@six.add_metaclass(abc.ABCMeta)
class _NextHop(type_desc.TypeDisp, stringify.StringifyMixin):
"""
Base class for Zebra Nexthop structure.
"""
# Zebra Nexthop structure:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Type |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 address or Interface Index number (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!B'
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
def __init__(self, ifindex=None, ifname=None, addr=None, type_=None):
super(_NextHop, self).__init__()
self.ifindex = ifindex
self.ifname = ifname
self.addr = addr
self.type = type_
@classmethod
@abc.abstractmethod
def parse(cls, buf):
(type_,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
subcls = cls._lookup_type(type_)
if subcls is None:
raise struct.error('unsupported Nexthop type: %d' % type_)
nexthop, rest = subcls.parse(rest)
nexthop.type = type_
return nexthop, rest
@abc.abstractmethod
def _serialize(self):
return b''
def serialize(self, version=_DEFAULT_VERSION):
if self.type is None:
if version <= 3:
nh_cls = _NextHop
elif version == 4:
nh_cls = _FrrNextHop
else:
raise ValueError(
'Unsupported Zebra protocol version: %d' % version)
self.type = nh_cls._rev_lookup_type(self.__class__)
return struct.pack(self._HEADER_FMT, self.type) + self._serialize()
@six.add_metaclass(abc.ABCMeta)
class _FrrNextHop(_NextHop):
"""
Base class for Zebra Nexthop structure for translating nexthop types
on FRRouting.
"""
_NEXTHOP_COUNT_FMT = '!B' # nexthop_count
_NEXTHOP_COUNT_SIZE = struct.calcsize(_NEXTHOP_COUNT_FMT)
def _parse_nexthops(buf, version=_DEFAULT_VERSION):
(nexthop_count,) = struct.unpack_from(_NEXTHOP_COUNT_FMT, buf)
rest = buf[_NEXTHOP_COUNT_SIZE:]
if version <= 3:
nh_cls = _NextHop
elif version == 4:
nh_cls = _FrrNextHop
else:
raise struct.error(
'Unsupported Zebra protocol version: %d' % version)
nexthops = []
for _ in range(nexthop_count):
nexthop, rest = nh_cls.parse(rest)
nexthops.append(nexthop)
return nexthops, rest
def _serialize_nexthops(nexthops, version=_DEFAULT_VERSION):
nexthop_count = len(nexthops)
buf = struct.pack(_NEXTHOP_COUNT_FMT, nexthop_count)
if nexthop_count == 0:
return buf
for nexthop in nexthops:
buf += nexthop.serialize(version=version)
return buf
@_FrrNextHop.register_type(FRR_ZEBRA_NEXTHOP_IFINDEX)
@_NextHop.register_type(ZEBRA_NEXTHOP_IFINDEX)
class NextHopIFIndex(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IFINDEX type.
"""
_BODY_FMT = '!I' # ifindex
BODY_SIZE = struct.calcsize(_BODY_FMT)
@classmethod
def parse(cls, buf):
(ifindex,) = struct.unpack_from(cls._BODY_FMT, buf)
rest = buf[cls.BODY_SIZE:]
return cls(ifindex=ifindex), rest
def _serialize(self):
return struct.pack(self._BODY_FMT, self.ifindex)
@_NextHop.register_type(ZEBRA_NEXTHOP_IFNAME)
class NextHopIFName(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IFNAME type.
"""
_BODY_FMT = '!I' # ifindex
BODY_SIZE = struct.calcsize(_BODY_FMT)
@classmethod
def parse(cls, buf):
(ifindex,) = struct.unpack_from(cls._BODY_FMT, buf)
rest = buf[cls.BODY_SIZE:]
return cls(ifindex=ifindex), rest
def _serialize(self):
return struct.pack(self._BODY_FMT, self.ifindex)
@_FrrNextHop.register_type(FRR_ZEBRA_NEXTHOP_IPV4)
@_NextHop.register_type(ZEBRA_NEXTHOP_IPV4)
class NextHopIPv4(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IPV4 type.
"""
_BODY_FMT = '!4s' # addr(IPv4)
BODY_SIZE = struct.calcsize(_BODY_FMT)
_BODY_FMT_FRR_V3 = '!4sI' # addr(IPv4), ifindex
BODY_SIZE_FRR_V3 = struct.calcsize(_BODY_FMT_FRR_V3)
@classmethod
def parse(cls, buf):
if _is_frr_version_ge(_FRR_VERSION_3_0):
(addr, ifindex) = struct.unpack_from(cls._BODY_FMT_FRR_V3, buf)
addr = addrconv.ipv4.bin_to_text(addr)
rest = buf[cls.BODY_SIZE_FRR_V3:]
return cls(ifindex=ifindex, addr=addr), rest
addr = addrconv.ipv4.bin_to_text(buf[:cls.BODY_SIZE])
rest = buf[cls.BODY_SIZE:]
return cls(addr=addr), rest
def _serialize(self):
if _is_frr_version_ge(_FRR_VERSION_3_0) and self.ifindex:
addr = addrconv.ipv4.text_to_bin(self.addr)
return struct.pack(self._BODY_FMT_FRR_V3, addr, self.ifindex)
return addrconv.ipv4.text_to_bin(self.addr)
@_FrrNextHop.register_type(FRR_ZEBRA_NEXTHOP_IPV4_IFINDEX)
@_NextHop.register_type(ZEBRA_NEXTHOP_IPV4_IFINDEX)
class NextHopIPv4IFIndex(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IPV4_IFINDEX type.
"""
_BODY_FMT = '!4sI' # addr(IPv4), ifindex
BODY_SIZE = struct.calcsize(_BODY_FMT)
@classmethod
def parse(cls, buf):
(addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf)
addr = addrconv.ipv4.bin_to_text(addr)
rest = buf[cls.BODY_SIZE:]
return cls(ifindex=ifindex, addr=addr), rest
def _serialize(self):
addr = addrconv.ipv4.text_to_bin(self.addr)
return struct.pack(self._BODY_FMT, addr, self.ifindex)
@_NextHop.register_type(ZEBRA_NEXTHOP_IPV4_IFNAME)
class NextHopIPv4IFName(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IPV4_IFNAME type.
"""
_BODY_FMT = '!4sI' # addr(IPv4), ifindex
BODY_SIZE = struct.calcsize(_BODY_FMT)
@classmethod
def parse(cls, buf):
(addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf)
addr = addrconv.ipv4.bin_to_text(addr)
rest = buf[cls.BODY_SIZE:]
return cls(ifindex=ifindex, addr=addr), rest
def _serialize(self):
addr = addrconv.ipv4.text_to_bin(self.addr)
return struct.pack(self._BODY_FMT, addr, self.ifindex)
@_FrrNextHop.register_type(FRR_ZEBRA_NEXTHOP_IPV6)
@_NextHop.register_type(ZEBRA_NEXTHOP_IPV6)
class NextHopIPv6(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IPV6 type.
"""
_BODY_FMT = '!16s' # addr(IPv6)
BODY_SIZE = struct.calcsize(_BODY_FMT)
_BODY_FMT_FRR_V3 = '!16sI' # addr(IPv6), ifindex
BODY_SIZE_FRR_V3 = struct.calcsize(_BODY_FMT_FRR_V3)
@classmethod
def parse(cls, buf):
if _is_frr_version_ge(_FRR_VERSION_3_0):
(addr, ifindex) = struct.unpack_from(cls._BODY_FMT_FRR_V3, buf)
addr = addrconv.ipv4.bin_to_text(addr)
rest = buf[cls.BODY_SIZE_FRR_V3:]
return cls(ifindex=ifindex, addr=addr), rest
addr = addrconv.ipv6.bin_to_text(buf[:cls.BODY_SIZE])
rest = buf[cls.BODY_SIZE:]
return cls(addr=addr), rest
def _serialize(self):
if _is_frr_version_ge(_FRR_VERSION_3_0) and self.ifindex:
addr = addrconv.ipv4.text_to_bin(self.addr)
return struct.pack(self._BODY_FMT_FRR_V3, addr, self.ifindex)
return addrconv.ipv6.text_to_bin(self.addr)
@_FrrNextHop.register_type(FRR_ZEBRA_NEXTHOP_IPV6_IFINDEX)
@_NextHop.register_type(ZEBRA_NEXTHOP_IPV6_IFINDEX)
class NextHopIPv6IFIndex(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IPV6_IFINDEX type.
"""
_BODY_FMT = '!16sI' # addr(IPv6), ifindex
BODY_SIZE = struct.calcsize(_BODY_FMT)
@classmethod
def parse(cls, buf):
(addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf)
addr = addrconv.ipv6.bin_to_text(addr)
rest = buf[cls.BODY_SIZE:]
return cls(ifindex=ifindex, addr=addr), rest
def _serialize(self):
addr = addrconv.ipv6.text_to_bin(self.addr)
return struct.pack(self._BODY_FMT, addr, self.ifindex)
@_NextHop.register_type(ZEBRA_NEXTHOP_IPV6_IFNAME)
class NextHopIPv6IFName(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_IPV6_IFNAME type.
"""
_BODY_FMT = '!16sI' # addr(IPv6), ifindex
BODY_SIZE = struct.calcsize(_BODY_FMT)
@classmethod
def parse(cls, buf):
(addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf)
addr = addrconv.ipv6.bin_to_text(addr)
rest = buf[cls.BODY_SIZE:]
return cls(ifindex=ifindex, addr=addr), rest
def _serialize(self):
addr = addrconv.ipv6.text_to_bin(self.addr)
return struct.pack(self._BODY_FMT, addr, self.ifindex)
@_FrrNextHop.register_type(FRR_ZEBRA_NEXTHOP_BLACKHOLE)
@_NextHop.register_type(ZEBRA_NEXTHOP_BLACKHOLE)
class NextHopBlackhole(_NextHop):
"""
Nexthop class for ZEBRA_NEXTHOP_BLACKHOLE type.
"""
@classmethod
def parse(cls, buf):
return cls(), buf
def _serialize(self):
return b''
class RegisteredNexthop(stringify.StringifyMixin):
"""
Unit of ZEBRA_NEXTHOP_REGISTER message body.
"""
# Unit of Zebra Nexthop Register message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Connected | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!?H'
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
# Note: connected is renamed to flags on FRRouting.
def __init__(self, connected, family, prefix):
super(RegisteredNexthop, self).__init__()
self.connected = connected
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
@property
def flags(self):
return self.connected
@flags.setter
def flags(self, v):
self.connected = v
@classmethod
def parse(cls, buf):
(connected, family) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
prefix, rest = _parse_ip_prefix(family, rest)
return cls(connected, family, prefix), rest
def serialize(self):
buf = struct.pack(self._HEADER_FMT, self.connected, self.family)
return buf + _serialize_ip_prefix(self.prefix)
# Zebra message class
class ZebraMessage(packet_base.PacketBase):
"""
Zebra protocol parser/serializer class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== ==========================================================
Attribute Description
============== ==========================================================
length Total packet length including this header.
The minimum length is 3 bytes for version 0 messages,
6 bytes for version 1/2 messages and 8 bytes for version
3 messages.
version Version number of the Zebra protocol message.
To instantiate messages with other than the default
version, ``version`` must be specified.
vrf_id VRF ID for the route contained in message.
Not present in version 0/1/2 messages in the on-wire
structure, and always 0 for theses version.
command Zebra Protocol command, which denotes message type.
body Messages body.
An instance of subclass of ``_ZebraMessageBody`` named
like "Zebra + <message name>" (e.g., ``ZebraHello``).
Or ``None`` if message does not contain any body.
============== ==========================================================
.. Note::
To instantiate Zebra messages, ``command`` can be omitted when the
valid ``body`` is specified.
::
>>> from ryu.lib.packet import zebra
>>> zebra.ZebraMessage(body=zebra.ZebraHello())
ZebraMessage(body=ZebraHello(route_type=14),command=23,
length=None,version=3,vrf_id=0)
On the other hand, if ``body`` is omitted, ``command`` must be
specified.
::
>>> zebra.ZebraMessage(command=zebra.ZEBRA_INTERFACE_ADD)
ZebraMessage(body=None,command=1,length=None,version=3,vrf_id=0)
"""
# Zebra Protocol Common Header (version 0):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Length | Command |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_V0_HEADER_FMT = '!HB'
V0_HEADER_SIZE = struct.calcsize(_V0_HEADER_FMT)
_MIN_LEN = V0_HEADER_SIZE
# Zebra Protocol Common Header (version 1):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Length | Marker | Version |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Command |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_V1_HEADER_FMT = '!HBBH'
V1_HEADER_SIZE = struct.calcsize(_V1_HEADER_FMT)
# Zebra Protocol Common Header (version 3):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Length | Marker | Version |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | VRF ID | Command |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_V3_HEADER_FMT = '!HBBHH'
V3_HEADER_SIZE = struct.calcsize(_V3_HEADER_FMT)
# Note: Marker should be 0xff(=255) in the version>=1 header.
# Also, FRRouting uses the different marker value.
_MARKER = 0xff
_LT_MARKER = 0xfe
def __init__(self, length=None, version=_DEFAULT_VERSION,
vrf_id=0, command=None, body=None):
super(ZebraMessage, self).__init__()
self.length = length
self.version = version
self.vrf_id = vrf_id
self.command = command
self.body = body
def _fill_command(self):
assert isinstance(self.body, _ZebraMessageBody)
body_base_cls = _ZebraMessageBody
if self.version == 4:
body_base_cls = _FrrZebraMessageBody
self.command = body_base_cls.rev_lookup_command(self.body.__class__)
@classmethod
def get_header_size(cls, version):
if version == 0:
return cls.V0_HEADER_SIZE
elif version in [1, 2]:
return cls.V1_HEADER_SIZE
elif version in [3, 4]:
return cls.V3_HEADER_SIZE
else:
raise ValueError(
'Unsupported Zebra protocol version: %d'
% version)
@classmethod
def parse_header(cls, buf):
(length, marker) = struct.unpack_from(cls._V0_HEADER_FMT, buf)
if marker not in [cls._MARKER, cls._LT_MARKER]:
command = marker
body_buf = buf[cls.V0_HEADER_SIZE:length]
# version=0, vrf_id=0
return length, 0, 0, command, body_buf
(length, marker, version, command) = struct.unpack_from(
cls._V1_HEADER_FMT, buf)
if version in [1, 2]:
body_buf = buf[cls.V1_HEADER_SIZE:length]
# vrf_id=0
return length, version, 0, command, body_buf
(length, marker, version, vrf_id, command) = struct.unpack_from(
cls._V3_HEADER_FMT, buf)
if version == 3 or (version == 4 and marker == cls._LT_MARKER):
body_buf = buf[cls.V3_HEADER_SIZE:length]
return length, version, vrf_id, command, body_buf
raise struct.error(
'Failed to parse Zebra protocol header: '
'marker=%d, version=%d' % (marker, version))
@classmethod
def get_body_class(cls, version, command):
if version == 4:
return _FrrZebraMessageBody.lookup_command(command)
else:
return _ZebraMessageBody.lookup_command(command)
@classmethod
def _parser_impl(cls, buf, from_zebra=False):
buf = six.binary_type(buf)
(length, version, vrf_id, command,
body_buf) = cls.parse_header(buf)
if body_buf:
body_cls = cls.get_body_class(version, command)
if from_zebra:
body = body_cls.parse_from_zebra(body_buf, version=version)
else:
body = body_cls.parse(body_buf, version=version)
else:
body = None
rest = buf[length:]
if from_zebra:
return (cls(length, version, vrf_id, command, body),
_ZebraMessageFromZebra, rest)
return cls(length, version, vrf_id, command, body), cls, rest
@classmethod
def parser(cls, buf):
return cls._parser_impl(buf)
def serialize_header(self, body_len):
if self.version == 0:
self.length = self.V0_HEADER_SIZE + body_len # fixup
return struct.pack(
self._V0_HEADER_FMT,
self.length, self.command)
elif self.version in [1, 2]:
self.length = self.V1_HEADER_SIZE + body_len # fixup
return struct.pack(
self._V1_HEADER_FMT,
self.length, self._MARKER, self.version,
self.command)
elif self.version in [3, 4]:
if self.version == 3:
_marker = self._MARKER
else: # self.version == 4
_marker = self._LT_MARKER
self.length = self.V3_HEADER_SIZE + body_len # fixup
return struct.pack(
self._V3_HEADER_FMT,
self.length, _marker, self.version,
self.vrf_id, self.command)
else:
raise ValueError(
'Unsupported Zebra protocol version: %d'
% self.version)
def serialize(self, _payload=None, _prev=None):
if self.body is None:
assert self.command is not None
body = b''
else:
assert isinstance(self.body, _ZebraMessageBody)
self._fill_command() # fixup
body = self.body.serialize(version=self.version)
return self.serialize_header(len(body)) + body
class _ZebraMessageFromZebra(ZebraMessage):
"""
This class is corresponding to the message sent from Zebra daemon.
"""
@classmethod
def parser(cls, buf):
return ZebraMessage._parser_impl(buf, from_zebra=True)
# Alias
zebra = ZebraMessage
# Zebra message body classes
class _ZebraMessageBody(type_desc.TypeDisp, stringify.StringifyMixin):
"""
Base class for Zebra message body.
"""
@classmethod
def lookup_command(cls, command):
return cls._lookup_type(command)
@classmethod
def rev_lookup_command(cls, body_cls):
return cls._rev_lookup_type(body_cls)
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
return cls()
@classmethod
def parse_from_zebra(cls, buf, version=_DEFAULT_VERSION):
return cls.parse(buf, version=version)
def serialize(self, version=_DEFAULT_VERSION):
return b''
class _FrrZebraMessageBody(_ZebraMessageBody):
"""
Pseudo message body class for translating message types on FRRouting.
"""
@_FrrZebraMessageBody.register_unknown_type()
@_ZebraMessageBody.register_unknown_type()
class ZebraUnknownMessage(_ZebraMessageBody):
"""
Message body class for Unknown command.
"""
def __init__(self, buf):
super(ZebraUnknownMessage, self).__init__()
self.buf = buf
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
return cls(buf)
def serialize(self, version=_DEFAULT_VERSION):
return self.buf
@six.add_metaclass(abc.ABCMeta)
class _ZebraInterface(_ZebraMessageBody):
"""
Base class for ZEBRA_INTERFACE_ADD, ZEBRA_INTERFACE_DELETE,
ZEBRA_INTERFACE_UP and ZEBRA_INTERFACE_DOWN message body.
"""
# Zebra Interface Add/Delete message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface Name (INTERFACE_NAMSIZE bytes length) |
# | |
# | |
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Status |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface flags |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (PTM Enable) | (PTM Status) | v4(FRRouting)
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Speed): v4(FRRouting v3.0 or later) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface's MTU for IPv4 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface's MTU for IPv6 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Bandwidth |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Link Layer Type): v3 or later |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Hardware Address Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Hardware Address if HW length different from 0 |
# | ... max is INTERFACE_HWADDR_MAX |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | link_params? | Whether a link-params follows: 1 or 0.
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Link params 0 or 1 INTERFACE_LINK_PARAMS_SIZE sized |
# | .... (struct if_link_params). |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ifname, ifindex, status, if_flags, metric, ifmtu, ifmtu6, bandwidth,
# hw_addr_len
_HEADER_FMT = '!%dsIBQIIIII' % INTERFACE_NAMSIZE
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
# ifname, ifindex, status, if_flags, metric, ifmtu, ifmtu6, bandwidth,
# ll_type, hw_addr_len
_V3_HEADER_FMT = '!%dsIBQIIIIII' % INTERFACE_NAMSIZE
V3_HEADER_SIZE = struct.calcsize(_V3_HEADER_FMT)
# ifname, ifindex, status, if_flags, ptm_enable, ptm_status, metric,
# ifmtu, ifmtu6, bandwidth, ll_type, hw_addr_len
_V4_HEADER_FMT_2_0 = '!%dsIBQBBIIIIII' % INTERFACE_NAMSIZE
V4_HEADER_SIZE_2_0 = struct.calcsize(_V4_HEADER_FMT_2_0)
# ifname, ifindex, status, if_flags, ptm_enable, ptm_status, metric,
# speed, ifmtu, ifmtu6, bandwidth, ll_type, hw_addr_len
_V4_HEADER_FMT_3_0 = '!%dsIBQBBIIIIIII' % INTERFACE_NAMSIZE
V4_HEADER_SIZE_3_0 = struct.calcsize(_V4_HEADER_FMT_3_0)
# link_params_state (whether a link-params follows)
_LP_STATE_FMT = '!?'
LP_STATE_SIZE = struct.calcsize(_LP_STATE_FMT)
# See InterfaceLinkParams class for Link params structure
def __init__(self, ifname=None, ifindex=None, status=None, if_flags=None,
ptm_enable=None, ptm_status=None,
metric=None, speed=None, ifmtu=None, ifmtu6=None,
bandwidth=None, ll_type=None, hw_addr_len=0, hw_addr=None,
link_params=None):
super(_ZebraInterface, self).__init__()
self.ifname = ifname
self.ifindex = ifindex
self.status = status
self.if_flags = if_flags
self.ptm_enable = ptm_enable
self.ptm_status = ptm_status
self.metric = metric
self.speed = speed
self.ifmtu = ifmtu
self.ifmtu6 = ifmtu6
self.bandwidth = bandwidth
self.ll_type = ll_type
self.hw_addr_lenght = hw_addr_len
hw_addr = hw_addr or b''
self.hw_addr = hw_addr
assert (isinstance(link_params, InterfaceLinkParams)
or link_params is None)
self.link_params = link_params
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
ptm_enable = None
ptm_status = None
speed = None
ll_type = None
if version <= 2:
(ifname, ifindex, status, if_flags, metric,
ifmtu, ifmtu6, bandwidth,
hw_addr_len) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
elif version == 3:
(ifname, ifindex, status, if_flags, metric,
ifmtu, ifmtu6, bandwidth, ll_type,
hw_addr_len) = struct.unpack_from(cls._V3_HEADER_FMT, buf)
rest = buf[cls.V3_HEADER_SIZE:]
elif version == 4:
if _is_frr_version_ge(_FRR_VERSION_3_0):
(ifname, ifindex, status, if_flags, ptm_enable, ptm_status,
metric, speed, ifmtu, ifmtu6, bandwidth, ll_type,
hw_addr_len) = struct.unpack_from(cls._V4_HEADER_FMT_3_0, buf)
rest = buf[cls.V4_HEADER_SIZE_3_0:]
elif _is_frr_version_ge(_FRR_VERSION_2_0):
(ifname, ifindex, status, if_flags, ptm_enable, ptm_status,
metric, ifmtu, ifmtu6, bandwidth, ll_type,
hw_addr_len) = struct.unpack_from(cls._V4_HEADER_FMT_2_0, buf)
rest = buf[cls.V4_HEADER_SIZE_2_0:]
else:
raise struct.error(
'Unsupported FRRouting version: %s'
% CONF['zapi'].frr_version)
else:
raise struct.error(
'Unsupported Zebra protocol version: %d'
% version)
ifname = str(six.text_type(ifname.strip(b'\x00'), 'ascii'))
hw_addr_len = min(hw_addr_len, INTERFACE_HWADDR_MAX)
hw_addr_bin = rest[:hw_addr_len]
rest = rest[hw_addr_len:]
if 0 < hw_addr_len < 7:
# Assuming MAC address
hw_addr = addrconv.mac.bin_to_text(
hw_addr_bin + b'\x00' * (6 - hw_addr_len))
else:
# Unknown hardware address
hw_addr = hw_addr_bin
if not rest:
return cls(ifname, ifindex, status, if_flags,
ptm_enable, ptm_status, metric, speed, ifmtu, ifmtu6,
bandwidth, ll_type, hw_addr_len, hw_addr)
(link_param_state,) = struct.unpack_from(cls._LP_STATE_FMT, rest)
rest = rest[cls.LP_STATE_SIZE:]
if link_param_state:
link_params, rest = InterfaceLinkParams.parse(rest)
else:
link_params = None
return cls(ifname, ifindex, status, if_flags,
ptm_enable, ptm_status, metric, speed, ifmtu, ifmtu6,
bandwidth, ll_type, hw_addr_len, hw_addr,
link_params)
def serialize(self, version=_DEFAULT_VERSION):
if self.ifname is None:
# Case for sending message to Zebra
return b''
# fixup
if netaddr.valid_mac(self.hw_addr):
# MAC address
hw_addr_len = 6
hw_addr = addrconv.mac.text_to_bin(self.hw_addr)
else:
# Unknown hardware address
hw_addr_len = len(self.hw_addr)
hw_addr = self.hw_addr
if version <= 2:
return struct.pack(
self._HEADER_FMT,
self.ifname.encode('ascii'), self.ifindex, self.status,
self.if_flags, self.metric, self.ifmtu, self.ifmtu6,
self.bandwidth, hw_addr_len) + hw_addr
elif version == 3:
buf = struct.pack(
self._V3_HEADER_FMT,
self.ifname.encode('ascii'), self.ifindex, self.status,
self.if_flags, self.metric, self.ifmtu, self.ifmtu6,
self.bandwidth, self.ll_type, hw_addr_len) + hw_addr
elif version == 4:
if _is_frr_version_ge(_FRR_VERSION_3_0):
buf = struct.pack(
self._V4_HEADER_FMT_3_0,
self.ifname.encode('ascii'), self.ifindex, self.status,
self.if_flags, self.ptm_enable, self.ptm_status,
self.metric, self.speed, self.ifmtu, self.ifmtu6,
self.bandwidth, self.ll_type, hw_addr_len) + hw_addr
elif _is_frr_version_ge(_FRR_VERSION_2_0):
buf = struct.pack(
self._V4_HEADER_FMT_2_0,
self.ifname.encode('ascii'), self.ifindex, self.status,
self.if_flags, self.ptm_enable, self.ptm_status,
self.metric, self.ifmtu, self.ifmtu6,
self.bandwidth, self.ll_type, hw_addr_len) + hw_addr
else:
raise ValueError(
'Unsupported FRRouting version: %s'
% CONF['zapi'].frr_version)
else:
raise ValueError(
'Unsupported Zebra protocol version: %d'
% version)
if isinstance(self.link_params, InterfaceLinkParams):
buf += struct.pack(self._LP_STATE_FMT, True)
buf += self.link_params.serialize()
else:
buf += struct.pack(self._LP_STATE_FMT, False)
return buf
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_ADD)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_ADD)
class ZebraInterfaceAdd(_ZebraInterface):
"""
Message body class for ZEBRA_INTERFACE_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_DELETE)
class ZebraInterfaceDelete(_ZebraInterface):
"""
Message body class for ZEBRA_INTERFACE_DELETE.
"""
@six.add_metaclass(abc.ABCMeta)
class _ZebraInterfaceAddress(_ZebraMessageBody):
"""
Base class for ZEBRA_INTERFACE_ADDRESS_ADD and
ZEBRA_INTERFACE_ADDRESS_DELETE message body.
"""
# Zebra Interface Address Add/Delete message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IFC Flags | flags for connected address
# +-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Prefix len |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Destination Address (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!IB' # ifindex, ifc_flags
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
def __init__(self, ifindex, ifc_flags, family, prefix, dest):
super(_ZebraInterfaceAddress, self).__init__()
self.ifindex = ifindex
self.ifc_flags = ifc_flags
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
assert ip.valid_ipv4(dest) or ip.valid_ipv6(dest)
self.dest = dest
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(ifindex, ifc_flags) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(family, prefix, rest) = _parse_zebra_family_prefix(rest)
if socket.AF_INET == family:
dest = addrconv.ipv4.bin_to_text(rest)
elif socket.AF_INET6 == family:
dest = addrconv.ipv6.bin_to_text(rest)
else:
raise struct.error('Unsupported family: %d' % family)
return cls(ifindex, ifc_flags, family, prefix, dest)
def serialize(self, version=_DEFAULT_VERSION):
(self.family, # fixup
body_bin) = _serialize_zebra_family_prefix(self.prefix)
if ip.valid_ipv4(self.dest):
body_bin += addrconv.ipv4.text_to_bin(self.dest)
elif ip.valid_ipv6(self.prefix):
body_bin += addrconv.ipv6.text_to_bin(self.dest)
else:
raise ValueError(
'Invalid destination address: %s' % self.dest)
return struct.pack(self._HEADER_FMT,
self.ifindex, self.ifc_flags) + body_bin
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_ADDRESS_ADD)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_ADDRESS_ADD)
class ZebraInterfaceAddressAdd(_ZebraInterfaceAddress):
"""
Message body class for ZEBRA_INTERFACE_ADDRESS_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_ADDRESS_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_ADDRESS_DELETE)
class ZebraInterfaceAddressDelete(_ZebraInterfaceAddress):
"""
Message body class for ZEBRA_INTERFACE_ADDRESS_DELETE.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_UP)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_UP)
class ZebraInterfaceUp(_ZebraInterface):
"""
Message body class for ZEBRA_INTERFACE_UP.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_DOWN)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_DOWN)
class ZebraInterfaceDown(_ZebraInterface):
"""
Message body class for ZEBRA_INTERFACE_DOWN.
"""
@six.add_metaclass(abc.ABCMeta)
class _ZebraIPRoute(_ZebraMessageBody):
"""
Base class for ZEBRA_IPV4_ROUTE_* and ZEBRA_IPV6_ROUTE_*
message body.
.. Note::
Zebra IPv4/IPv6 Route message have asymmetric structure.
If the message sent from Zebra Daemon, set 'from_zebra=True' to
create an instance of this class.
"""
# Zebra IPv4/IPv6 Route message body (Protocol Daemons -> Zebra Daemon):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type | Flags | Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SAFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Num |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthops (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Distance) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Metric) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (MTU) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (TAG) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Zebra IPv4/IPv6 Route message body on FRRouting
# (Protocol Daemons -> Zebra Daemon):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type | Instance |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Message | SAFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (IPv4/v6 Source Prefix): v4(FRRouting v3.0 or later) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Num |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthops (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Distance) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Metric) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (TAG) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (MTU) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Zebra IPv4/IPv6 Route message body (Zebra Daemon -> Protocol Daemons):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type | Flags | Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Nexthop Num) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Nexthops (Variable)) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (IFIndex Num) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Interface indexes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Distance) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Metric) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (MTU) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (TAG) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Zebra IPv4/IPv6 Route message body on FRRouting
# (Zebra Daemon -> Protocol Daemons):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type | Instance |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (IPv4/v6 Source Prefix): v4(FRRouting v3.0 or later) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Nexthop Num) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Nexthops (Variable)) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (IFIndex Num) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Interface indexes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Distance) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Metric) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (TAG) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!BBB' # type, flags, message
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_V4_HEADER_FMT = '!BHIB' # type, instance, flags, message
V4_HEADER_SIZE = struct.calcsize(_V4_HEADER_FMT)
_SAFI_FMT = '!H' # safi
SAFI_SIZE = struct.calcsize(_SAFI_FMT)
_NUM_FMT = '!B' # nexthop_num or ifindex_num
NUM_SIZE = struct.calcsize(_NUM_FMT)
_IFINDEX_FMT = '!I' # ifindex
IFINDEX_SIZE = struct.calcsize(_IFINDEX_FMT)
# API type specific constants
_FAMILY = None # either socket.AF_INET or socket.AF_INET6
def __init__(self, route_type, flags, message, safi=None,
prefix=None, src_prefix=None,
nexthops=None, ifindexes=None,
distance=None, metric=None, mtu=None, tag=None,
instance=None, from_zebra=False):
super(_ZebraIPRoute, self).__init__()
self.route_type = route_type
self.instance = instance
self.flags = flags
self.message = message
# SAFI should be included if this message sent to Zebra.
if from_zebra:
self.safi = None
else:
self.safi = safi or packet_safi.UNICAST
assert prefix is not None
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
if isinstance(src_prefix, (IPv4Prefix, IPv6Prefix)):
src_prefix = src_prefix.prefix
self.src_prefix = src_prefix
# Nexthops should be a list of str representations of IP address
# if this message sent from Zebra, otherwise a list of _Nexthop
# subclasses.
nexthops = nexthops or []
if from_zebra:
for nexthop in nexthops:
assert ip.valid_ipv4(nexthop) or ip.valid_ipv6(nexthop)
else:
for nexthop in nexthops:
assert isinstance(nexthop, _NextHop)
self.nexthops = nexthops
# Interface indexes should be included if this message sent from
# Zebra.
if from_zebra:
ifindexes = ifindexes or []
for ifindex in ifindexes:
assert isinstance(ifindex, six.integer_types)
self.ifindexes = ifindexes
else:
self.ifindexes = None
self.distance = distance
self.metric = metric
self.mtu = mtu
self.tag = tag
# is this message sent from Zebra message or not.
self.from_zebra = from_zebra
@classmethod
def _parse_message_option(cls, message, flag, fmt, buf):
if message & flag:
(option,) = struct.unpack_from(fmt, buf)
return option, buf[struct.calcsize(fmt):]
return None, buf
@classmethod
def _parse_impl(cls, buf, version=_DEFAULT_VERSION, from_zebra=False):
instance = None
if version <= 3:
(route_type, flags, message,) = struct.unpack_from(
cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
elif version == 4:
(route_type, instance, flags, message,) = struct.unpack_from(
cls._V4_HEADER_FMT, buf)
rest = buf[cls.V4_HEADER_SIZE:]
else:
raise struct.error(
'Unsupported Zebra protocol version: %d'
% version)
if from_zebra:
safi = None
else:
(safi,) = struct.unpack_from(cls._SAFI_FMT, rest)
rest = rest[cls.SAFI_SIZE:]
prefix, rest = _parse_ip_prefix(cls._FAMILY, rest)
src_prefix = None
if version == 4 and message & FRR_ZAPI_MESSAGE_SRCPFX:
src_prefix, rest = _parse_ip_prefix(cls._FAMILY, rest)
if from_zebra and message & ZAPI_MESSAGE_NEXTHOP:
nexthops = []
(nexthop_num,) = struct.unpack_from(cls._NUM_FMT, rest)
rest = rest[cls.NUM_SIZE:]
if cls._FAMILY == socket.AF_INET:
for _ in range(nexthop_num):
nexthop = addrconv.ipv4.bin_to_text(rest[:4])
nexthops.append(nexthop)
rest = rest[4:]
else: # cls._FAMILY == socket.AF_INET6:
for _ in range(nexthop_num):
nexthop = addrconv.ipv6.bin_to_text(rest[:16])
nexthops.append(nexthop)
rest = rest[16:]
else:
nexthops, rest = _parse_nexthops(rest, version)
ifindexes = []
if from_zebra and message & ZAPI_MESSAGE_IFINDEX:
(ifindex_num,) = struct.unpack_from(cls._NUM_FMT, rest)
rest = rest[cls.NUM_SIZE:]
for _ in range(ifindex_num):
(ifindex,) = struct.unpack_from(cls._IFINDEX_FMT, rest)
ifindexes.append(ifindex)
rest = rest[cls.IFINDEX_SIZE:]
if version <= 3:
distance, rest = cls._parse_message_option(
message, ZAPI_MESSAGE_DISTANCE, '!B', rest)
metric, rest = cls._parse_message_option(
message, ZAPI_MESSAGE_METRIC, '!I', rest)
mtu, rest = cls._parse_message_option(
message, ZAPI_MESSAGE_MTU, '!I', rest)
tag, rest = cls._parse_message_option(
message, ZAPI_MESSAGE_TAG, '!I', rest)
elif version == 4:
distance, rest = cls._parse_message_option(
message, FRR_ZAPI_MESSAGE_DISTANCE, '!B', rest)
metric, rest = cls._parse_message_option(
message, FRR_ZAPI_MESSAGE_METRIC, '!I', rest)
tag, rest = cls._parse_message_option(
message, FRR_ZAPI_MESSAGE_TAG, '!I', rest)
mtu, rest = cls._parse_message_option(
message, FRR_ZAPI_MESSAGE_MTU, '!I', rest)
else:
raise struct.error(
'Unsupported Zebra protocol version: %d'
% version)
return cls(route_type, flags, message, safi, prefix, src_prefix,
nexthops, ifindexes,
distance, metric, mtu, tag,
instance, from_zebra=from_zebra)
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
return cls._parse_impl(buf, version=version)
@classmethod
def parse_from_zebra(cls, buf, version=_DEFAULT_VERSION):
return cls._parse_impl(buf, version=version, from_zebra=True)
def _serialize_message_option(self, option, flag, fmt):
if option is None:
return b''
# fixup
self.message |= flag
return struct.pack(fmt, option)
def serialize(self, version=_DEFAULT_VERSION):
prefix = _serialize_ip_prefix(self.prefix)
if version == 4 and self.src_prefix:
self.message |= FRR_ZAPI_MESSAGE_SRCPFX # fixup
prefix += _serialize_ip_prefix(self.src_prefix)
nexthops = b''
if self.from_zebra and self.nexthops:
self.message |= ZAPI_MESSAGE_NEXTHOP # fixup
nexthops += struct.pack(self._NUM_FMT, len(self.nexthops))
for nexthop in self.nexthops:
nexthops += ip.text_to_bin(nexthop)
else:
self.message |= ZAPI_MESSAGE_NEXTHOP # fixup
nexthops = _serialize_nexthops(self.nexthops, version=version)
ifindexes = b''
if self.ifindexes and self.from_zebra:
self.message |= ZAPI_MESSAGE_IFINDEX # fixup
ifindexes += struct.pack(self._NUM_FMT, len(self.ifindexes))
for ifindex in self.ifindexes:
ifindexes += struct.pack(self._IFINDEX_FMT, ifindex)
if version <= 3:
options = self._serialize_message_option(
self.distance, ZAPI_MESSAGE_DISTANCE, '!B')
options += self._serialize_message_option(
self.metric, ZAPI_MESSAGE_METRIC, '!I')
options += self._serialize_message_option(
self.mtu, ZAPI_MESSAGE_MTU, '!I')
options += self._serialize_message_option(
self.tag, ZAPI_MESSAGE_TAG, '!I')
header = struct.pack(
self._HEADER_FMT,
self.route_type, self.flags, self.message)
elif version == 4:
options = self._serialize_message_option(
self.distance, FRR_ZAPI_MESSAGE_DISTANCE, '!B')
options += self._serialize_message_option(
self.metric, FRR_ZAPI_MESSAGE_METRIC, '!I')
options += self._serialize_message_option(
self.tag, FRR_ZAPI_MESSAGE_TAG, '!I')
options += self._serialize_message_option(
self.mtu, FRR_ZAPI_MESSAGE_MTU, '!I')
header = struct.pack(
self._V4_HEADER_FMT,
self.route_type, self.instance, self.flags, self.message)
else:
raise ValueError(
'Unsupported Zebra protocol version: %d'
% version)
if not self.from_zebra:
header += struct.pack(self._SAFI_FMT, self.safi)
return header + prefix + nexthops + ifindexes + options
class _ZebraIPv4Route(_ZebraIPRoute):
"""
Base class for ZEBRA_IPV4_ROUTE_* message body.
"""
_FAMILY = socket.AF_INET
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV4_ROUTE_ADD)
@_ZebraMessageBody.register_type(ZEBRA_IPV4_ROUTE_ADD)
class ZebraIPv4RouteAdd(_ZebraIPv4Route):
"""
Message body class for ZEBRA_IPV4_ROUTE_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV4_ROUTE_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_IPV4_ROUTE_DELETE)
class ZebraIPv4RouteDelete(_ZebraIPv4Route):
"""
Message body class for ZEBRA_IPV4_ROUTE_DELETE.
"""
class _ZebraIPv6Route(_ZebraIPRoute):
"""
Base class for ZEBRA_IPV6_ROUTE_* message body.
"""
_FAMILY = socket.AF_INET6
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV6_ROUTE_ADD)
@_ZebraMessageBody.register_type(ZEBRA_IPV6_ROUTE_ADD)
class ZebraIPv6RouteAdd(_ZebraIPv6Route):
"""
Message body class for ZEBRA_IPV6_ROUTE_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV6_ROUTE_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_IPV6_ROUTE_DELETE)
class ZebraIPv6RouteDelete(_ZebraIPv6Route):
"""
Message body class for ZEBRA_IPV6_ROUTE_DELETE.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV4_ROUTE_IPV6_NEXTHOP_ADD)
class ZebraIPv4RouteIPv6NexthopAdd(_ZebraIPv4Route):
"""
Message body class for FRR_ZEBRA_IPV4_ROUTE_IPV6_NEXTHOP_ADD.
"""
@six.add_metaclass(abc.ABCMeta)
class _ZebraRedistribute(_ZebraMessageBody):
"""
Base class for ZEBRA_REDISTRIBUTE_ADD and ZEBRA_REDISTRIBUTE_DELETE
message body.
"""
# Zebra Redistribute message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type |
# +-+-+-+-+-+-+-+-+
#
# Zebra Redistribute message body on FRRouting:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI | Route Type | Instance |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++-+-+-+-+-+-+-+-++-+-+-+-+-+-+
_HEADER_FMT = '!B' # route_type
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_V4_HEADER_FMT = '!BBH' # afi, route_type, instance
V4_HEADER_SIZE = struct.calcsize(_V4_HEADER_FMT)
def __init__(self, route_type, afi=None, instance=None):
super(_ZebraRedistribute, self).__init__()
self.afi = afi
self.route_type = route_type
self.instance = instance
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
afi = None
instance = None
if version <= 3:
(route_type,) = struct.unpack_from(cls._HEADER_FMT, buf)
elif version == 4:
(afi, route_type,
instance) = struct.unpack_from(cls._V4_HEADER_FMT, buf)
else:
raise struct.error(
'Unsupported Zebra protocol version: %d'
% version)
return cls(route_type, afi, instance)
def serialize(self, version=_DEFAULT_VERSION):
if version <= 3:
return struct.pack(self._HEADER_FMT, self.route_type)
elif version == 4:
return struct.pack(self._V4_HEADER_FMT,
self.afi, self.route_type, self.instance)
else:
raise ValueError(
'Unsupported Zebra protocol version: %d'
% version)
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_ADD)
@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_ADD)
class ZebraRedistributeAdd(_ZebraRedistribute):
"""
Message body class for ZEBRA_REDISTRIBUTE_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_DELETE)
class ZebraRedistributeDelete(_ZebraRedistribute):
"""
Message body class for ZEBRA_REDISTRIBUTE_DELETE.
"""
@six.add_metaclass(abc.ABCMeta)
class _ZebraRedistributeDefault(_ZebraMessageBody):
"""
Base class for ZEBRA_REDISTRIBUTE_DEFAULT_ADD and
ZEBRA_REDISTRIBUTE_DEFAULT_DELETE message body.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_DEFAULT_ADD)
@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_DEFAULT_ADD)
class ZebraRedistributeDefaultAdd(_ZebraRedistribute):
"""
Message body class for ZEBRA_REDISTRIBUTE_DEFAULT_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_DEFAULT_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_DEFAULT_DELETE)
class ZebraRedistributeDefaultDelete(_ZebraRedistribute):
"""
Message body class for ZEBRA_REDISTRIBUTE_DEFAULT_DELETE.
"""
@six.add_metaclass(abc.ABCMeta)
class _ZebraIPNexthopLookup(_ZebraMessageBody):
"""
Base class for ZEBRA_IPV4_NEXTHOP_LOOKUP and
ZEBRA_IPV6_NEXTHOP_LOOKUP message body.
"""
# Zebra IPv4/v6 Nexthop Lookup message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Num |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthops (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_METRIC_FMT = '!I' # metric
METRIC_SIZE = struct.calcsize(_METRIC_FMT)
# Message type specific constants
ADDR_CLS = None # either addrconv.ipv4 or addrconv.ipv6
ADDR_LEN = None # IP address length in bytes
def __init__(self, addr, metric=None, nexthops=None):
super(_ZebraIPNexthopLookup, self).__init__()
assert ip.valid_ipv4(addr) or ip.valid_ipv6(addr)
self.addr = addr
self.metric = metric
nexthops = nexthops or []
for nexthop in nexthops:
assert isinstance(nexthop, _NextHop)
self.nexthops = nexthops
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
addr = cls.ADDR_CLS.bin_to_text(buf[:cls.ADDR_LEN])
rest = buf[cls.ADDR_LEN:]
metric = None
if rest:
# Note: Case for ZEBRA_IPV4_NEXTHOP_LOOKUP request
(metric,) = struct.unpack_from(cls._METRIC_FMT, rest)
rest = rest[cls.METRIC_SIZE:]
nexthops = None
if rest:
nexthops, rest = _parse_nexthops(rest, version)
return cls(addr, metric, nexthops)
def serialize(self, version=_DEFAULT_VERSION):
buf = self.ADDR_CLS.text_to_bin(self.addr)
if self.metric is None:
return buf
buf += struct.pack(self._METRIC_FMT, self.metric)
return buf + _serialize_nexthops(self.nexthops, version=version)
@_ZebraMessageBody.register_type(ZEBRA_IPV4_NEXTHOP_LOOKUP)
class ZebraIPv4NexthopLookup(_ZebraIPNexthopLookup):
"""
Message body class for ZEBRA_IPV4_NEXTHOP_LOOKUP.
"""
ADDR_CLS = addrconv.ipv4
ADDR_LEN = 4
@_ZebraMessageBody.register_type(ZEBRA_IPV6_NEXTHOP_LOOKUP)
class ZebraIPv6NexthopLookup(_ZebraIPNexthopLookup):
"""
Message body class for ZEBRA_IPV6_NEXTHOP_LOOKUP.
"""
ADDR_CLS = addrconv.ipv6
ADDR_LEN = 16
@six.add_metaclass(abc.ABCMeta)
class _ZebraIPImportLookup(_ZebraMessageBody):
"""
Base class for ZEBRA_IPV4_IMPORT_LOOKUP and
ZEBRA_IPV6_IMPORT_LOOKUP message body.
.. Note::
Zebra IPv4/v6 Import Lookup message have asymmetric structure.
If the message sent from Zebra Daemon, set 'from_zebra=True' to
create an instance of this class.
"""
# Zebra IPv4/v6 Import Lookup message body
# (Protocol Daemons -> Zebra Daemon):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Prefix Len |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Zebra IPv4/v6 Import Lookup message body
# (Zebra Daemons -> Protocol Daemon):
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Num |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthops (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_PREFIX_LEN_FMT = '!B' # prefix_len
PREFIX_LEN_SIZE = struct.calcsize(_PREFIX_LEN_FMT)
_METRIC_FMT = '!I' # metric
METRIC_SIZE = struct.calcsize(_METRIC_FMT)
# Message type specific constants
PREFIX_CLS = None # either addrconv.ipv4 or addrconv.ipv6
PREFIX_LEN = None # IP prefix length in bytes
def __init__(self, prefix, metric=None, nexthops=None,
from_zebra=False):
super(_ZebraIPImportLookup, self).__init__()
if not from_zebra:
assert ip.valid_ipv4(prefix) or ip.valid_ipv6(prefix)
else:
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
else:
assert ip.valid_ipv4(prefix) or ip.valid_ipv6(prefix)
self.prefix = prefix
self.metric = metric
nexthops = nexthops or []
for nexthop in nexthops:
assert isinstance(nexthop, _NextHop)
self.nexthops = nexthops
self.from_zebra = from_zebra
@classmethod
def parse_impl(cls, buf, version=_DEFAULT_VERSION, from_zebra=False):
if not from_zebra:
(prefix_len,) = struct.unpack_from(cls._PREFIX_LEN_FMT, buf)
rest = buf[cls.PREFIX_LEN_SIZE:]
prefix = cls.PREFIX_CLS.bin_to_text(rest[:cls.PREFIX_LEN])
return cls('%s/%d' % (prefix, prefix_len), from_zebra=False)
prefix = cls.PREFIX_CLS.bin_to_text(buf[:cls.PREFIX_LEN])
rest = buf[4:]
(metric,) = struct.unpack_from(cls._METRIC_FMT, rest)
rest = rest[cls.METRIC_SIZE:]
nexthops, rest = _parse_nexthops(rest, version)
return cls(prefix, metric, nexthops, from_zebra=True)
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
return cls.parse_impl(buf, version=version, from_zebra=False)
@classmethod
def parse_from_zebra(cls, buf, version=_DEFAULT_VERSION):
return cls.parse_impl(buf, version=version, from_zebra=True)
def serialize(self, version=_DEFAULT_VERSION):
if not self.from_zebra:
if ip.valid_ipv4(self.prefix) or ip.valid_ipv6(self.prefix):
prefix, prefix_len = self.prefix.split('/')
return struct.pack(
self._PREFIX_LEN_FMT,
int(prefix_len)) + self.PREFIX_CLS.text_to_bin(prefix)
else:
raise ValueError('Invalid prefix: %s' % self.prefix)
if ip.valid_ipv4(self.prefix) or ip.valid_ipv6(self.prefix):
buf = self.PREFIX_CLS.text_to_bin(self.prefix)
else:
raise ValueError('Invalid prefix: %s' % self.prefix)
buf += struct.pack(self._METRIC_FMT, self.metric)
return buf + _serialize_nexthops(self.nexthops, version=version)
@_ZebraMessageBody.register_type(ZEBRA_IPV4_IMPORT_LOOKUP)
class ZebraIPv4ImportLookup(_ZebraIPImportLookup):
"""
Message body class for ZEBRA_IPV4_IMPORT_LOOKUP.
"""
PREFIX_CLS = addrconv.ipv4
PREFIX_LEN = 4
@_ZebraMessageBody.register_type(ZEBRA_IPV6_IMPORT_LOOKUP)
class ZebraIPv6ImportLookup(_ZebraIPImportLookup):
"""
Message body class for ZEBRA_IPV6_IMPORT_LOOKUP.
"""
PREFIX_CLS = addrconv.ipv6
PREFIX_LEN = 16
# Note: Not implemented in quagga/zebra/zserv.c
# @_ZebraMessageBody.register_type(ZEBRA_INTERFACE_RENAME)
# class ZebraInterfaceRename(_ZebraMessageBody):
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_ROUTER_ID_ADD)
@_ZebraMessageBody.register_type(ZEBRA_ROUTER_ID_ADD)
class ZebraRouterIDAdd(_ZebraMessageBody):
"""
Message body class for ZEBRA_ROUTER_ID_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_ROUTER_ID_DELETE)
@_ZebraMessageBody.register_type(ZEBRA_ROUTER_ID_DELETE)
class ZebraRouterIDDelete(_ZebraMessageBody):
"""
Message body class for ZEBRA_ROUTER_ID_DELETE.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_ROUTER_ID_UPDATE)
@_ZebraMessageBody.register_type(ZEBRA_ROUTER_ID_UPDATE)
class ZebraRouterIDUpdate(_ZebraMessageBody):
"""
Message body class for ZEBRA_ROUTER_ID_UPDATE.
"""
# Zebra Router ID Update message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Prefix len |
# +-+-+-+-+-+-+-+-+
def __init__(self, family, prefix):
super(ZebraRouterIDUpdate, self).__init__()
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(family, prefix, _) = _parse_zebra_family_prefix(buf)
return cls(family, prefix)
def serialize(self, version=_DEFAULT_VERSION):
(self.family, # fixup
buf) = _serialize_zebra_family_prefix(self.prefix)
return buf
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_HELLO)
@_ZebraMessageBody.register_type(ZEBRA_HELLO)
class ZebraHello(_ZebraMessageBody):
"""
Message body class for ZEBRA_HELLO.
"""
# Zebra Hello message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type | (Instance): v4(FRRouting) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!B' # route_type
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_V4_HEADER_FMT = '!BH' # route_type, instance
V4_HEADER_SIZE = struct.calcsize(_V4_HEADER_FMT)
def __init__(self, route_type, instance=None):
super(ZebraHello, self).__init__()
self.route_type = route_type
self.instance = instance
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
instance = None
if version <= 3:
(route_type,) = struct.unpack_from(cls._HEADER_FMT, buf)
elif version == 4:
(route_type,
instance) = struct.unpack_from(cls._V4_HEADER_FMT, buf)
else:
raise struct.error(
'Unsupported Zebra protocol version: %d'
% version)
return cls(route_type, instance)
def serialize(self, version=_DEFAULT_VERSION):
if version <= 3:
return struct.pack(self._HEADER_FMT, self.route_type)
elif version == 4:
return struct.pack(self._V4_HEADER_FMT,
self.route_type, self.instance)
else:
raise ValueError(
'Unsupported Zebra protocol version: %d'
% version)
@six.add_metaclass(abc.ABCMeta)
class _ZebraIPNexthopLookupMRib(_ZebraMessageBody):
"""
Base class for ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB (and
ZEBRA_IPV6_NEXTHOP_LOOKUP_MRIB) message body.
"""
# Zebra IPv4/v6 Nexthop Lookup MRIB message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Distance |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Num |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthops (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_DISTANCE_METRIC_FMT = '!BI' # distance, metric
DISTANCE_METRIC_SIZE = struct.calcsize(_DISTANCE_METRIC_FMT)
# Message type specific constants
ADDR_CLS = None # either addrconv.ipv4 or addrconv.ipv6
ADDR_LEN = None # IP address length in bytes
def __init__(self, addr, distance=None, metric=None, nexthops=None):
super(_ZebraIPNexthopLookupMRib, self).__init__()
assert ip.valid_ipv4(addr) or ip.valid_ipv6(addr)
self.addr = addr
self.distance = distance
self.metric = metric
nexthops = nexthops or []
for nexthop in nexthops:
assert isinstance(nexthop, _NextHop)
self.nexthops = nexthops
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
addr = cls.ADDR_CLS.bin_to_text(buf[:cls.ADDR_LEN])
rest = buf[cls.ADDR_LEN:]
if not rest:
return cls(addr)
(distance,
metric) = struct.unpack_from(cls._DISTANCE_METRIC_FMT, rest)
rest = rest[cls.DISTANCE_METRIC_SIZE:]
nexthops, rest = _parse_nexthops(rest, version)
return cls(addr, distance, metric, nexthops)
def serialize(self, version=_DEFAULT_VERSION):
buf = self.ADDR_CLS.text_to_bin(self.addr)
if self.distance is None or self.metric is None:
return buf
buf += struct.pack(
self._DISTANCE_METRIC_FMT, self.distance, self.metric)
return buf + _serialize_nexthops(self.nexthops, version=version)
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB)
@_ZebraMessageBody.register_type(ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB)
class ZebraIPv4NexthopLookupMRib(_ZebraIPNexthopLookupMRib):
"""
Message body class for ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB.
"""
ADDR_CLS = addrconv.ipv4
ADDR_LEN = 4
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_VRF_UNREGISTER)
@_ZebraMessageBody.register_type(ZEBRA_VRF_UNREGISTER)
class ZebraVrfUnregister(_ZebraMessageBody):
"""
Message body class for ZEBRA_VRF_UNREGISTER.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_LINK_PARAMS)
@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_LINK_PARAMS)
class ZebraInterfaceLinkParams(_ZebraMessageBody):
"""
Message body class for ZEBRA_INTERFACE_LINK_PARAMS.
"""
# Zebra Interface Link Parameters message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface Index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface Link Parameters |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!I' # ifindex
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
# See InterfaceLinkParams class for Interface Link Parameters structure
def __init__(self, ifindex, link_params):
super(ZebraInterfaceLinkParams, self).__init__()
self.ifindex = ifindex
assert isinstance(link_params, InterfaceLinkParams)
self.link_params = link_params
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(ifindex,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
link_params, rest = InterfaceLinkParams.parse(rest)
return cls(ifindex, link_params)
def serialize(self, version=_DEFAULT_VERSION):
buf = struct.pack(self._HEADER_FMT, self.ifindex)
return buf + self.link_params.serialize()
class _ZebraNexthopRegister(_ZebraMessageBody):
"""
Base class for ZEBRA_NEXTHOP_REGISTER and ZEBRA_NEXTHOP_UNREGISTER
message body.
"""
# Zebra Nexthop Register message body:
# (Repeat of RegisteredNexthop class)
def __init__(self, nexthops):
super(_ZebraNexthopRegister, self).__init__()
nexthops = nexthops or []
for nexthop in nexthops:
assert isinstance(nexthop, RegisteredNexthop)
self.nexthops = nexthops
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
nexthops = []
while buf:
nexthop, buf = RegisteredNexthop.parse(buf)
nexthops.append(nexthop)
return cls(nexthops)
def serialize(self, version=_DEFAULT_VERSION):
buf = b''
for nexthop in self.nexthops:
buf += nexthop.serialize()
return buf
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_NEXTHOP_REGISTER)
@_ZebraMessageBody.register_type(ZEBRA_NEXTHOP_REGISTER)
class ZebraNexthopRegister(_ZebraNexthopRegister):
"""
Message body class for ZEBRA_NEXTHOP_REGISTER.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_NEXTHOP_UNREGISTER)
@_ZebraMessageBody.register_type(ZEBRA_NEXTHOP_UNREGISTER)
class ZebraNexthopUnregister(_ZebraNexthopRegister):
"""
Message body class for ZEBRA_NEXTHOP_UNREGISTER.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_NEXTHOP_UPDATE)
@_ZebraMessageBody.register_type(ZEBRA_NEXTHOP_UPDATE)
class ZebraNexthopUpdate(_ZebraMessageBody):
"""
Message body class for ZEBRA_NEXTHOP_UPDATE.
"""
# Zebra IPv4/v6 Nexthop Update message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Distance) | v4(FRRouting v3.0 or later)
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthop Num |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nexthops (Variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_FAMILY_FMT = '!H' # family
FAMILY_SIZE = struct.calcsize(_FAMILY_FMT)
_DISTANCE_FMT = '!B' # metric
DISTANCE_SIZE = struct.calcsize(_DISTANCE_FMT)
_METRIC_FMT = '!I' # metric
METRIC_SIZE = struct.calcsize(_METRIC_FMT)
def __init__(self, family, prefix, distance=None, metric=None,
nexthops=None):
super(ZebraNexthopUpdate, self).__init__()
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
if _is_frr_version_ge(_FRR_VERSION_3_0):
assert distance is not None
self.distance = distance
assert metric is not None
self.metric = metric
nexthops = nexthops or []
for nexthop in nexthops:
assert isinstance(nexthop, _NextHop)
self.nexthops = nexthops
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(family,) = struct.unpack_from(cls._FAMILY_FMT, buf)
rest = buf[cls.FAMILY_SIZE:]
prefix, rest = _parse_ip_prefix(family, rest)
distance = None
if _is_frr_version_ge(_FRR_VERSION_3_0):
(distance,) = struct.unpack_from(cls._DISTANCE_FMT, rest)
rest = rest[cls.DISTANCE_SIZE:]
(metric,) = struct.unpack_from(cls._METRIC_FMT, rest)
rest = rest[cls.METRIC_SIZE:]
nexthops, rest = _parse_nexthops(rest, version)
return cls(family, prefix, distance, metric, nexthops)
def serialize(self, version=_DEFAULT_VERSION):
# fixup
if ip.valid_ipv4(self.prefix):
self.family = socket.AF_INET
elif ip.valid_ipv6(self.prefix):
self.family = socket.AF_INET6
else:
raise ValueError('Invalid prefix: %s' % self.prefix)
buf = struct.pack(self._FAMILY_FMT, self.family)
buf += _serialize_ip_prefix(self.prefix)
if _is_frr_version_ge(_FRR_VERSION_3_0):
buf += struct.pack(self._DISTANCE_FMT, self.distance)
buf += struct.pack(self._METRIC_FMT, self.metric)
return buf + _serialize_nexthops(self.nexthops, version=version)
class _ZebraInterfaceNbrAddress(_ZebraMessageBody):
"""
Base class for FRR_ZEBRA_INTERFACE_NBR_ADDRESS_* message body.
"""
# Zebra Interface Neighbor Address message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Prefix len |
# +-+-+-+-+-+-+-+-+
_HEADER_FMT = '!I' # ifindex
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
def __init__(self, ifindex, family, prefix):
super(_ZebraInterfaceNbrAddress, self).__init__()
self.ifindex = ifindex
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(ifindex,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(family, prefix, _) = _parse_zebra_family_prefix(rest)
return cls(ifindex, family, prefix)
def serialize(self, version=_DEFAULT_VERSION):
(self.family, # fixup
body_bin) = _serialize_zebra_family_prefix(self.prefix)
return struct.pack(self._HEADER_FMT, self.ifindex) + body_bin
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_NBR_ADDRESS_ADD)
class ZebraInterfaceNbrAddressAdd(_ZebraInterfaceNbrAddress):
"""
Message body class for FRR_ZEBRA_INTERFACE_NBR_ADDRESS_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_NBR_ADDRESS_DELETE)
class ZebraInterfaceNbrAddressDelete(_ZebraInterfaceNbrAddress):
"""
Message body class for FRR_ZEBRA_INTERFACE_NBR_ADDRESS_DELETE.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_BFD_DEST_UPDATE)
class ZebraInterfaceBfdDestinationUpdate(_ZebraMessageBody):
"""
Message body class for FRR_ZEBRA_INTERFACE_BFD_DEST_UPDATE.
"""
# Zebra Interface BFD Destination Update message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Dst Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Dst IPv4/v6 prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Dst Plen |
# +-+-+-+-+-+-+-+-+
# | Status |
# +-+-+-+-+-+-+-+-+
# | Src Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source IPv4/v6 prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Src Plen |
# +-+-+-+-+-+-+-+-+
_HEADER_FMT = '!I' # ifindex
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_STATUS_FMT = '!B' # status
STATUS_SIZE = struct.calcsize(_STATUS_FMT)
def __init__(self, ifindex, dst_family, dst_prefix, status,
src_family, src_prefix):
super(ZebraInterfaceBfdDestinationUpdate, self).__init__()
self.ifindex = ifindex
self.dst_family = dst_family
if isinstance(dst_prefix, (IPv4Prefix, IPv6Prefix)):
dst_prefix = dst_prefix.prefix
self.dst_prefix = dst_prefix
self.status = status
self.src_family = src_family
if isinstance(src_prefix, (IPv4Prefix, IPv6Prefix)):
src_prefix = src_prefix.prefix
self.src_prefix = src_prefix
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(ifindex,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(dst_family, dst_prefix,
rest) = _parse_zebra_family_prefix(rest)
(status,) = struct.unpack_from(cls._STATUS_FMT, rest)
rest = rest[cls.STATUS_SIZE:]
(src_family, src_prefix,
_) = _parse_zebra_family_prefix(rest)
return cls(ifindex, dst_family, dst_prefix, status,
src_family, src_prefix)
def serialize(self, version=_DEFAULT_VERSION):
(self.dst_family, # fixup
dst_bin) = _serialize_zebra_family_prefix(self.dst_prefix)
status_bin = struct.pack(
self._STATUS_FMT, self.status)
(self.src_family, # fixup
src_bin) = _serialize_zebra_family_prefix(self.src_prefix)
return struct.pack(
self._HEADER_FMT,
self.ifindex) + dst_bin + status_bin + src_bin
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IMPORT_ROUTE_REGISTER)
class ZebraImportRouteRegister(_ZebraNexthopRegister):
"""
Message body class for FRR_ZEBRA_IMPORT_ROUTE_REGISTER.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IMPORT_ROUTE_UNREGISTER)
class ZebraImportRouteUnregister(_ZebraNexthopRegister):
"""
Message body class for FRR_ZEBRA_IMPORT_ROUTE_UNREGISTER.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IMPORT_CHECK_UPDATE)
class ZebraImportCheckUpdate(ZebraNexthopUpdate):
"""
Message body class for FRR_ZEBRA_IMPORT_CHECK_UPDATE.
"""
class _ZebraBfdDestination(_ZebraMessageBody):
"""
Base class for FRR_ZEBRA_BFD_DEST_REGISTER and
FRR_ZEBRA_BFD_DEST_UPDATE message body.
"""
# Zebra BFD Destination message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | PID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Destination Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Destination IPv4/v6 prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Min RX Timer |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Min TX Timer |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Detect Mult |
# +-+-+-+-+-+-+-+-+
# | Multi Hop |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source IPv4/v6 prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (MultiHopCnt) | if Multi Hop enabled
# +-+-+-+-+-+-+-+-+
# | (IFName Len) | if Multi Hop disabled
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (Interface Name (Variable)) if Multi Hop disabled |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!I' # pid
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_FAMILY_FMT = '!H'
FAMILY_SIZE = struct.calcsize(_FAMILY_FMT)
_BODY_FMT = '!IIBB' # min_rx_timer, min_tx_timer, detect_mult, multi_hop
BODY_SIZE = struct.calcsize(_BODY_FMT)
_FOOTER_FMT = '!B' # multi_hop_count or ifname_len
FOOTER_SIZE = struct.calcsize(_FOOTER_FMT)
def __init__(self, pid, dst_family, dst_prefix,
min_rx_timer, min_tx_timer, detect_mult,
multi_hop, src_family, src_prefix,
multi_hop_count=None, ifname=None):
super(_ZebraBfdDestination, self).__init__()
self.pid = pid
self.dst_family = dst_family
assert ip.valid_ipv4(dst_prefix) or ip.valid_ipv6(dst_prefix)
self.dst_prefix = dst_prefix
self.min_rx_timer = min_rx_timer
self.min_tx_timer = min_tx_timer
self.detect_mult = detect_mult
self.multi_hop = multi_hop
self.src_family = src_family
assert ip.valid_ipv4(src_prefix) or ip.valid_ipv6(src_prefix)
self.src_prefix = src_prefix
self.multi_hop_count = multi_hop_count
self.ifname = ifname
@classmethod
def _parse_family_prefix(cls, buf):
(family,) = struct.unpack_from(cls._FAMILY_FMT, buf)
rest = buf[cls.FAMILY_SIZE:]
if socket.AF_INET == family:
return family, addrconv.ipv4.bin_to_text(rest[:4]), rest[4:]
elif socket.AF_INET6 == family:
return family, addrconv.ipv6.bin_to_text(rest[:16]), rest[16:]
raise struct.error('Unsupported family: %d' % family)
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(pid,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(dst_family, dst_prefix,
rest) = cls._parse_family_prefix(rest)
(min_rx_timer, min_tx_timer, detect_mult,
multi_hop) = struct.unpack_from(cls._BODY_FMT, rest)
rest = rest[cls.BODY_SIZE:]
(src_family, src_prefix,
rest) = cls._parse_family_prefix(rest)
multi_hop_count = None
ifname = None
if multi_hop:
(multi_hop_count,) = struct.unpack_from(cls._FOOTER_FMT, rest)
else:
(ifname_len,) = struct.unpack_from(cls._FOOTER_FMT, rest)
ifname_bin = rest[cls.FOOTER_SIZE:cls.FOOTER_SIZE + ifname_len]
ifname = str(six.text_type(ifname_bin.strip(b'\x00'), 'ascii'))
return cls(pid, dst_family, dst_prefix,
min_rx_timer, min_tx_timer, detect_mult,
multi_hop, src_family, src_prefix,
multi_hop_count, ifname)
def _serialize_family_prefix(self, prefix):
if ip.valid_ipv4(prefix):
family = socket.AF_INET
return (family,
struct.pack(self._FAMILY_FMT, family)
+ addrconv.ipv4.text_to_bin(prefix))
elif ip.valid_ipv6(prefix):
family = socket.AF_INET6
return (family,
struct.pack(self._FAMILY_FMT, family)
+ addrconv.ipv6.text_to_bin(prefix))
raise ValueError('Invalid prefix: %s' % prefix)
def serialize(self, version=_DEFAULT_VERSION):
(self.dst_family, # fixup
dst_bin) = self._serialize_family_prefix(self.dst_prefix)
body_bin = struct.pack(
self._BODY_FMT,
self.min_rx_timer, self.min_tx_timer, self.detect_mult,
self.multi_hop)
(self.src_family, # fixup
src_bin) = self._serialize_family_prefix(self.src_prefix)
if self.multi_hop:
footer_bin = struct.pack(
self._FOOTER_FMT, self.multi_hop_count)
else:
ifname_bin = self.ifname.encode('ascii')
footer_bin = struct.pack(
self._FOOTER_FMT, len(ifname_bin)) + ifname_bin
return struct.pack(
self._HEADER_FMT,
self.pid) + dst_bin + body_bin + src_bin + footer_bin
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_BFD_DEST_REGISTER)
class ZebraBfdDestinationRegister(_ZebraBfdDestination):
"""
Message body class for FRR_ZEBRA_BFD_DEST_REGISTER.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_BFD_DEST_DEREGISTER)
class ZebraBfdDestinationDeregister(_ZebraMessageBody):
"""
Message body class for FRR_ZEBRA_BFD_DEST_DEREGISTER.
"""
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | PID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Destination IPv4/v6 prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multi Hop |
# +-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source IPv4/v6 prefix (4 bytes or 16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (MultiHopCnt) | if Multi Hop enabled
# +-+-+-+-+-+-+-+-+
# | (IF Name Len) | if Multi Hop disabled
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | (IF Name (Variable)) if Multi Hop disabled |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!I' # pid
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_FAMILY_FMT = '!H'
FAMILY_SIZE = struct.calcsize(_FAMILY_FMT)
_BODY_FMT = '!B' # multi_hop
BODY_SIZE = struct.calcsize(_BODY_FMT)
_FOOTER_FMT = '!B' # multi_hop_count or ifname_len
FOOTER_SIZE = struct.calcsize(_FOOTER_FMT)
def __init__(self, pid, dst_family, dst_prefix,
multi_hop, src_family, src_prefix,
multi_hop_count=None, ifname=None):
super(ZebraBfdDestinationDeregister, self).__init__()
self.pid = pid
self.dst_family = dst_family
assert ip.valid_ipv4(dst_prefix) or ip.valid_ipv6(dst_prefix)
self.dst_prefix = dst_prefix
self.multi_hop = multi_hop
self.src_family = src_family
assert ip.valid_ipv4(src_prefix) or ip.valid_ipv6(src_prefix)
self.src_prefix = src_prefix
self.multi_hop_count = multi_hop_count
self.ifname = ifname
@classmethod
def _parse_family_prefix(cls, buf):
(family,) = struct.unpack_from(cls._FAMILY_FMT, buf)
rest = buf[cls.FAMILY_SIZE:]
if socket.AF_INET == family:
return family, addrconv.ipv4.bin_to_text(rest[:4]), rest[4:]
elif socket.AF_INET6 == family:
return family, addrconv.ipv6.bin_to_text(rest[:16]), rest[16:]
raise struct.error('Unsupported family: %d' % family)
@classmethod
def parse(cls, buf, version=_DEFAULT_VERSION):
(pid,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(dst_family, dst_prefix,
rest) = cls._parse_family_prefix(rest)
(multi_hop,) = struct.unpack_from(cls._BODY_FMT, rest)
rest = rest[cls.BODY_SIZE:]
(src_family, src_prefix,
rest) = cls._parse_family_prefix(rest)
multi_hop_count = None
ifname = None
if multi_hop:
(multi_hop_count,) = struct.unpack_from(cls._FOOTER_FMT, rest)
else:
(ifname_len,) = struct.unpack_from(cls._FOOTER_FMT, rest)
ifname_bin = rest[cls.FOOTER_SIZE:cls.FOOTER_SIZE + ifname_len]
ifname = str(six.text_type(ifname_bin.strip(b'\x00'), 'ascii'))
return cls(pid, dst_family, dst_prefix,
multi_hop, src_family, src_prefix,
multi_hop_count, ifname)
def _serialize_family_prefix(self, prefix):
if ip.valid_ipv4(prefix):
family = socket.AF_INET
return (family,
struct.pack(self._FAMILY_FMT, family)
+ addrconv.ipv4.text_to_bin(prefix))
elif ip.valid_ipv6(prefix):
family = socket.AF_INET6
return (family,
struct.pack(self._FAMILY_FMT, family)
+ addrconv.ipv6.text_to_bin(prefix))
raise ValueError('Invalid prefix: %s' % prefix)
def serialize(self, version=_DEFAULT_VERSION):
(self.dst_family, # fixup
dst_bin) = self._serialize_family_prefix(self.dst_prefix)
body_bin = struct.pack(self._BODY_FMT, self.multi_hop)
(self.src_family, # fixup
src_bin) = self._serialize_family_prefix(self.src_prefix)
if self.multi_hop:
footer_bin = struct.pack(
self._FOOTER_FMT, self.multi_hop_count)
else:
ifname_bin = self.ifname.encode('ascii')
footer_bin = struct.pack(
self._FOOTER_FMT, len(ifname_bin)) + ifname_bin
return struct.pack(
self._HEADER_FMT,
self.pid) + dst_bin + body_bin + src_bin + footer_bin
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_BFD_DEST_UPDATE)
class ZebraBfdDestinationUpdate(_ZebraBfdDestination):
"""
Message body class for FRR_ZEBRA_BFD_DEST_UPDATE.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_BFD_DEST_REPLAY)
class ZebraBfdDestinationReply(_ZebraMessageBody):
"""
Message body class for FRR_ZEBRA_BFD_DEST_REPLAY.
"""
class _ZebraRedistributeIPv4(_ZebraIPRoute):
"""
Base class for FRR_ZEBRA_REDISTRIBUTE_IPV4_* message body.
"""
_FAMILY = socket.AF_INET
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_IPV4_ADD)
class ZebraRedistributeIPv4Add(_ZebraRedistributeIPv4):
"""
Message body class for FRR_ZEBRA_IPV4_ROUTE_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_IPV4_DEL)
class ZebraRedistributeIPv4Delete(_ZebraRedistributeIPv4):
"""
Message body class for FRR_ZEBRA_IPV4_ROUTE_DELETE.
"""
class _ZebraRedistributeIPv6(_ZebraIPRoute):
"""
Base class for FRR_ZEBRA_REDISTRIBUTE_IPV6_* message body.
"""
_FAMILY = socket.AF_INET6
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_IPV6_ADD)
class ZebraRedistributeIPv6Add(_ZebraRedistributeIPv6):
"""
Message body class for FRR_ZEBRA_REDISTRIBUTE_IPV6_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_REDISTRIBUTE_IPV6_DEL)
class ZebraRedistributeIPv6Delete(_ZebraRedistributeIPv6):
"""
Message body class for FRR_ZEBRA_REDISTRIBUTE_IPV6_DEL.
"""
class _ZebraVrf(_ZebraMessageBody):
"""
Base class for FRR_ZEBRA_VRF_ADD and FRR_ZEBRA_VRF_DELETE message body.
"""
# Zebra VRF Add/Delete message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | VRF Name (VRF_NAMSIZ bytes length) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!%ds' % VRF_NAMSIZ
def __init__(self, vrf_name):
super(_ZebraVrf, self).__init__()
self.vrf_name = vrf_name
@classmethod
def parse(cls, buf, version=_DEFAULT_FRR_VERSION):
vrf_name_bin = buf[:VRF_NAMSIZ]
vrf_name = str(six.text_type(vrf_name_bin.strip(b'\x00'), 'ascii'))
return cls(vrf_name)
def serialize(self, version=_DEFAULT_FRR_VERSION):
return struct.pack(self._HEADER_FMT, self.vrf_name.encode('ascii'))
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_VRF_ADD)
class ZebraVrfAdd(_ZebraVrf):
"""
Message body class for FRR_ZEBRA_VRF_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_VRF_DELETE)
class ZebraVrfDelete(_ZebraVrf):
"""
Message body class for FRR_ZEBRA_VRF_DELETE.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_VRF_UPDATE)
class ZebraInterfaceVrfUpdate(_ZebraMessageBody):
"""
Message body class for FRR_ZEBRA_INTERFACE_VRF_UPDATE.
"""
# Zebra Interface VRF Update message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface Index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | VRF ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!IH' # ifindex, vrf_id
def __init__(self, ifindex, vrf_id):
super(ZebraInterfaceVrfUpdate, self).__init__()
self.ifindex = ifindex
self.vrf_id = vrf_id
@classmethod
def parse(cls, buf, version=_DEFAULT_FRR_VERSION):
(ifindex, vrf_id) = struct.unpack_from(cls._HEADER_FMT, buf)
return cls(ifindex, vrf_id)
def serialize(self, version=_DEFAULT_FRR_VERSION):
return struct.pack(self._HEADER_FMT, self.ifindex, self.vrf_id)
class _ZebraBfdClient(_ZebraMessageBody):
"""
Base class for FRR_ZEBRA_BFD_CLIENT_*.
"""
# Zebra BFD Client message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | PID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!I' # pid
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
def __init__(self, pid):
super(_ZebraBfdClient, self).__init__()
self.pid = pid
@classmethod
def parse(cls, buf, version=_DEFAULT_FRR_VERSION):
(pid,) = struct.unpack_from(cls._HEADER_FMT, buf)
return cls(pid)
def serialize(self, version=_DEFAULT_FRR_VERSION):
return struct.pack(self._HEADER_FMT, self.pid)
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_BFD_CLIENT_REGISTER)
class ZebraBfdClientRegister(_ZebraBfdClient):
"""
Message body class for FRR_ZEBRA_BFD_CLIENT_REGISTER.
"""
class _ZebraInterfaceRadv(_ZebraMessageBody):
"""
Base class for FRR_ZEBRA_INTERFACE_*_RADV message body.
"""
# Zebra interface Router Advertisement message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface Index |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | RA Interval |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!II' # ifindex, interval
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
def __init__(self, ifindex, interval):
super(_ZebraInterfaceRadv, self).__init__()
self.ifindex = ifindex
self.interval = interval
@classmethod
def parse(cls, buf, version=_DEFAULT_FRR_VERSION):
(ifindex, interval,) = struct.unpack_from(cls._HEADER_FMT, buf)
return cls(ifindex, interval)
def serialize(self, version=_DEFAULT_FRR_VERSION):
return struct.pack(self._HEADER_FMT, self.ifindex, self.interval)
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_ENABLE_RADV)
class ZebraInterfaceEnableRadv(_ZebraInterfaceRadv):
"""
Message body class for FRR_ZEBRA_INTERFACE_ENABLE_RADV.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_INTERFACE_DISABLE_RADV)
class ZebraInterfaceDisableRadv(_ZebraInterfaceRadv):
"""
Message body class for FRR_ZEBRA_INTERFACE_DISABLE_RADV.
"""
class _ZebraMplsLabels(_ZebraMessageBody):
"""
Base class for ZEBRA_MPLS_LABELS_* message body.
"""
# Zebra MPLS Labels message body:
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Route Type |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Family |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv4/v6 Prefix (4 bytes/16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Prefix Len |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Gate IPv4/v6 Address (4 bytes/16 bytes) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Interface Index: v4(FRRouting v3.0 or later) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Distance |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | In Label |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Out Label |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
_HEADER_FMT = '!B' # route_type
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
_FAMILY_FMT = '!I'
FAMILY_SIZE = struct.calcsize(_FAMILY_FMT)
_IPV4_PREFIX_FMT = '!4sB' # prefix, prefix_len
_IPV6_PREFIX_FMT = '!16sB'
IPV4_PREFIX_SIZE = struct.calcsize(_IPV4_PREFIX_FMT)
IPV6_PREFIX_SIZE = struct.calcsize(_IPV6_PREFIX_FMT)
_FAMILY_IPV4_PREFIX_FMT = '!I4sB'
_FAMILY_IPV6_PREFIX_FMT = '!I16sB'
_IFINDEX_FMT = '!I'
IFINDEX_SIZE = struct.calcsize(_IFINDEX_FMT)
_BODY_FMT = '!BII' # distance, in_label, out_label
def __init__(self, route_type, family, prefix, gate_addr, ifindex=None,
distance=None, in_label=None, out_label=None):
super(_ZebraMplsLabels, self).__init__()
self.route_type = route_type
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
assert ip.valid_ipv4(gate_addr) or ip.valid_ipv6(gate_addr)
self.gate_addr = gate_addr
if _is_frr_version_ge(_FRR_VERSION_3_0):
assert ifindex is not None
self.ifindex = ifindex
assert distance is not None
self.distance = distance
assert in_label is not None
self.in_label = in_label
assert out_label is not None
self.out_label = out_label
@classmethod
def _parse_family_prefix(cls, buf):
(family,) = struct.unpack_from(cls._FAMILY_FMT, buf)
rest = buf[cls.FAMILY_SIZE:]
if socket.AF_INET == family:
(prefix, p_len) = struct.unpack_from(cls._IPV4_PREFIX_FMT, rest)
prefix = '%s/%d' % (addrconv.ipv4.bin_to_text(prefix), p_len)
rest = rest[cls.IPV4_PREFIX_SIZE:]
elif socket.AF_INET6 == family:
(prefix, p_len) = struct.unpack_from(cls._IPV6_PREFIX_FMT, rest)
prefix = '%s/%d' % (addrconv.ipv6.bin_to_text(prefix), p_len)
rest = rest[cls.IPV6_PREFIX_SIZE:]
else:
raise struct.error('Unsupported family: %d' % family)
return family, prefix, rest
@classmethod
def parse(cls, buf, version=_DEFAULT_FRR_VERSION):
(route_type,) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(family, prefix, rest) = cls._parse_family_prefix(rest)
if family == socket.AF_INET:
gate_addr = addrconv.ipv4.bin_to_text(rest[:4])
rest = rest[4:]
elif family == socket.AF_INET6:
gate_addr = addrconv.ipv6.bin_to_text(rest[:16])
rest = rest[16:]
else:
raise struct.error('Unsupported family: %d' % family)
ifindex = None
if _is_frr_version_ge(_FRR_VERSION_3_0):
(ifindex,) = struct.unpack_from(cls._IFINDEX_FMT, rest)
rest = rest[cls.IFINDEX_SIZE:]
(distance, in_label,
out_label) = struct.unpack_from(cls._BODY_FMT, rest)
return cls(route_type, family, prefix, gate_addr, ifindex,
distance, in_label, out_label)
def _serialize_family_prefix(self, prefix):
if ip.valid_ipv4(prefix):
family = socket.AF_INET # fixup
prefix_addr, prefix_num = prefix.split('/')
return family, struct.pack(
self._FAMILY_IPV4_PREFIX_FMT,
family,
addrconv.ipv4.text_to_bin(prefix_addr),
int(prefix_num))
elif ip.valid_ipv6(prefix):
family = socket.AF_INET6 # fixup
prefix_addr, prefix_num = prefix.split('/')
return family, struct.pack(
self._FAMILY_IPV6_PREFIX_FMT,
family,
addrconv.ipv6.text_to_bin(prefix_addr),
int(prefix_num))
raise ValueError('Invalid prefix: %s' % prefix)
def serialize(self, version=_DEFAULT_FRR_VERSION):
(self.family, # fixup
prefix_bin) = self._serialize_family_prefix(self.prefix)
if self.family == socket.AF_INET:
gate_addr_bin = addrconv.ipv4.text_to_bin(self.gate_addr)
elif self.family == socket.AF_INET6:
gate_addr_bin = addrconv.ipv6.text_to_bin(self.gate_addr)
else:
raise ValueError('Unsupported family: %d' % self.family)
body_bin = b''
if _is_frr_version_ge(_FRR_VERSION_3_0):
body_bin = struct.pack(self._IFINDEX_FMT, self.ifindex)
body_bin += struct.pack(
self._BODY_FMT, self.distance, self.in_label, self.out_label)
return struct.pack(
self._HEADER_FMT,
self.route_type) + prefix_bin + gate_addr_bin + body_bin
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_MPLS_LABELS_ADD)
class ZebraMplsLabelsAdd(_ZebraMplsLabels):
"""
Message body class for FRR_ZEBRA_MPLS_LABELS_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_MPLS_LABELS_DELETE)
class ZebraMplsLabelsDelete(_ZebraMplsLabels):
"""
Message body class for FRR_ZEBRA_MPLS_LABELS_DELETE.
"""
class _ZebraIPv4Nexthop(_ZebraIPRoute):
"""
Base class for FRR_ZEBRA_IPV4_NEXTHOP_* message body.
"""
_FAMILY = socket.AF_INET
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV4_NEXTHOP_ADD)
class ZebraIPv4NexthopAdd(_ZebraIPv4Nexthop):
"""
Message body class for FRR_ZEBRA_IPV4_NEXTHOP_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV4_NEXTHOP_DELETE)
class ZebraIPv4NexthopDelete(_ZebraIPv4Nexthop):
"""
Message body class for FRR_ZEBRA_IPV4_NEXTHOP_DELETE.
"""
class _ZebraIPv6Nexthop(_ZebraIPRoute):
"""
Base class for FRR_ZEBRA_IPV6_NEXTHOP_* message body.
"""
_FAMILY = socket.AF_INET6
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV6_NEXTHOP_ADD)
class ZebraIPv6NexthopAdd(_ZebraIPv6Nexthop):
"""
Message body class for FRR_ZEBRA_IPV6_NEXTHOP_ADD.
"""
@_FrrZebraMessageBody.register_type(FRR_ZEBRA_IPV6_NEXTHOP_DELETE)
class ZebraIPv6NexthopDelete(_ZebraIPv6Nexthop):
"""
Message body class for FRR_ZEBRA_IPV6_NEXTHOP_DELETE.
"""
|
cytec/SickRage
|
refs/heads/master
|
lib/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
ijuma/kafka
|
refs/heads/trunk
|
tests/kafkatest/tests/client/quota_test.py
|
9
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from ducktape.mark import matrix, parametrize
from ducktape.mark.resource import cluster
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.performance import ProducerPerformanceService
from kafkatest.services.console_consumer import ConsoleConsumer
class QuotaConfig(object):
CLIENT_ID = 'client-id'
USER = 'user'
USER_CLIENT = '(user, client-id)'
LARGE_QUOTA = 1000 * 1000 * 1000
USER_PRINCIPAL = 'CN=systemtest'
def __init__(self, quota_type, override_quota, kafka):
if quota_type == QuotaConfig.CLIENT_ID:
if override_quota:
self.client_id = 'overridden_id'
self.producer_quota = 3750000
self.consumer_quota = 3000000
self.configure_quota(kafka, self.producer_quota, self.consumer_quota, ['clients', self.client_id])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', None])
else:
self.client_id = 'default_id'
self.producer_quota = 2500000
self.consumer_quota = 2000000
self.configure_quota(kafka, self.producer_quota, self.consumer_quota, ['clients', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', 'overridden_id'])
elif quota_type == QuotaConfig.USER:
if override_quota:
self.client_id = 'some_id'
self.producer_quota = 3750000
self.consumer_quota = 3000000
self.configure_quota(kafka, self.producer_quota, self.consumer_quota, ['users', QuotaConfig.USER_PRINCIPAL])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['users', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', self.client_id])
else:
self.client_id = 'some_id'
self.producer_quota = 2500000
self.consumer_quota = 2000000
self.configure_quota(kafka, self.producer_quota, self.consumer_quota, ['users', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', None])
elif quota_type == QuotaConfig.USER_CLIENT:
if override_quota:
self.client_id = 'overridden_id'
self.producer_quota = 3750000
self.consumer_quota = 3000000
self.configure_quota(kafka, self.producer_quota, self.consumer_quota, ['users', QuotaConfig.USER_PRINCIPAL, 'clients', self.client_id])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['users', QuotaConfig.USER_PRINCIPAL, 'clients', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['users', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', self.client_id])
else:
self.client_id = 'default_id'
self.producer_quota = 2500000
self.consumer_quota = 2000000
self.configure_quota(kafka, self.producer_quota, self.consumer_quota, ['users', None, 'clients', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['users', None])
self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', None])
def configure_quota(self, kafka, producer_byte_rate, consumer_byte_rate, entity_args):
node = kafka.nodes[0]
cmd = "%s --zookeeper %s --alter --add-config producer_byte_rate=%d,consumer_byte_rate=%d" % \
(kafka.path.script("kafka-configs.sh", node), kafka.zk.connect_setting(), producer_byte_rate, consumer_byte_rate)
cmd += " --entity-type " + entity_args[0] + self.entity_name_opt(entity_args[1])
if len(entity_args) > 2:
cmd += " --entity-type " + entity_args[2] + self.entity_name_opt(entity_args[3])
node.account.ssh(cmd)
def entity_name_opt(self, name):
return " --entity-default" if name is None else " --entity-name " + name
class QuotaTest(Test):
"""
These tests verify that quota provides expected functionality -- they run
producer, broker, and consumer with different clientId and quota configuration and
check that the observed throughput is close to the value we expect.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(QuotaTest, self).__init__(test_context=test_context)
self.topic = 'test_topic'
self.logger.info('use topic ' + self.topic)
self.maximum_client_deviation_percentage = 100.0
self.maximum_broker_deviation_percentage = 5.0
self.num_records = 50000
self.record_size = 3000
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
security_protocol='SSL', authorizer_class_name='',
interbroker_security_protocol='SSL',
topics={self.topic: {'partitions': 6, 'replication-factor': 1, 'configs': {'min.insync.replicas': 1}}},
jmx_object_names=['kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec',
'kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec'],
jmx_attributes=['OneMinuteRate'])
self.num_producers = 1
self.num_consumers = 2
def setUp(self):
self.zk.start()
self.kafka.start()
def min_cluster_size(self):
"""Override this since we're adding services outside of the constructor"""
return super(QuotaTest, self).min_cluster_size() + self.num_producers + self.num_consumers
@cluster(num_nodes=5)
@matrix(quota_type=[QuotaConfig.CLIENT_ID, QuotaConfig.USER, QuotaConfig.USER_CLIENT], override_quota=[True, False])
@parametrize(quota_type=QuotaConfig.CLIENT_ID, consumer_num=2)
def test_quota(self, quota_type, override_quota=True, producer_num=1, consumer_num=1):
self.quota_config = QuotaConfig(quota_type, override_quota, self.kafka)
producer_client_id = self.quota_config.client_id
consumer_client_id = self.quota_config.client_id
# Produce all messages
producer = ProducerPerformanceService(
self.test_context, producer_num, self.kafka,
topic=self.topic, num_records=self.num_records, record_size=self.record_size, throughput=-1, client_id=producer_client_id,
jmx_object_names=['kafka.producer:type=producer-metrics,client-id=%s' % producer_client_id], jmx_attributes=['outgoing-byte-rate'])
producer.run()
# Consume all messages
consumer = ConsoleConsumer(self.test_context, consumer_num, self.kafka, self.topic,
consumer_timeout_ms=60000, client_id=consumer_client_id,
jmx_object_names=['kafka.consumer:type=consumer-fetch-manager-metrics,client-id=%s' % consumer_client_id],
jmx_attributes=['bytes-consumed-rate'])
consumer.run()
for idx, messages in consumer.messages_consumed.iteritems():
assert len(messages) > 0, "consumer %d didn't consume any message before timeout" % idx
success, msg = self.validate(self.kafka, producer, consumer)
assert success, msg
def validate(self, broker, producer, consumer):
"""
For each client_id we validate that:
1) number of consumed messages equals number of produced messages
2) maximum_producer_throughput <= producer_quota * (1 + maximum_client_deviation_percentage/100)
3) maximum_broker_byte_in_rate <= producer_quota * (1 + maximum_broker_deviation_percentage/100)
4) maximum_consumer_throughput <= consumer_quota * (1 + maximum_client_deviation_percentage/100)
5) maximum_broker_byte_out_rate <= consumer_quota * (1 + maximum_broker_deviation_percentage/100)
"""
success = True
msg = ''
self.kafka.read_jmx_output_all_nodes()
# validate that number of consumed messages equals number of produced messages
produced_num = sum([value['records'] for value in producer.results])
consumed_num = sum([len(value) for value in consumer.messages_consumed.values()])
self.logger.info('producer produced %d messages' % produced_num)
self.logger.info('consumer consumed %d messages' % consumed_num)
if produced_num != consumed_num:
success = False
msg += "number of produced messages %d doesn't equal number of consumed messages %d" % (produced_num, consumed_num)
# validate that maximum_producer_throughput <= producer_quota * (1 + maximum_client_deviation_percentage/100)
producer_attribute_name = 'kafka.producer:type=producer-metrics,client-id=%s:outgoing-byte-rate' % producer.client_id
producer_maximum_bps = producer.maximum_jmx_value[producer_attribute_name]
producer_quota_bps = self.quota_config.producer_quota
self.logger.info('producer has maximum throughput %.2f bps with producer quota %.2f bps' % (producer_maximum_bps, producer_quota_bps))
if producer_maximum_bps > producer_quota_bps*(self.maximum_client_deviation_percentage/100+1):
success = False
msg += 'maximum producer throughput %.2f bps exceeded producer quota %.2f bps by more than %.1f%%' % \
(producer_maximum_bps, producer_quota_bps, self.maximum_client_deviation_percentage)
# validate that maximum_broker_byte_in_rate <= producer_quota * (1 + maximum_broker_deviation_percentage/100)
broker_byte_in_attribute_name = 'kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec:OneMinuteRate'
broker_maximum_byte_in_bps = broker.maximum_jmx_value[broker_byte_in_attribute_name]
self.logger.info('broker has maximum byte-in rate %.2f bps with producer quota %.2f bps' %
(broker_maximum_byte_in_bps, producer_quota_bps))
if broker_maximum_byte_in_bps > producer_quota_bps*(self.maximum_broker_deviation_percentage/100+1):
success = False
msg += 'maximum broker byte-in rate %.2f bps exceeded producer quota %.2f bps by more than %.1f%%' % \
(broker_maximum_byte_in_bps, producer_quota_bps, self.maximum_broker_deviation_percentage)
# validate that maximum_consumer_throughput <= consumer_quota * (1 + maximum_client_deviation_percentage/100)
consumer_attribute_name = 'kafka.consumer:type=consumer-fetch-manager-metrics,client-id=%s:bytes-consumed-rate' % consumer.client_id
consumer_maximum_bps = consumer.maximum_jmx_value[consumer_attribute_name]
consumer_quota_bps = self.quota_config.consumer_quota
self.logger.info('consumer has maximum throughput %.2f bps with consumer quota %.2f bps' % (consumer_maximum_bps, consumer_quota_bps))
if consumer_maximum_bps > consumer_quota_bps*(self.maximum_client_deviation_percentage/100+1):
success = False
msg += 'maximum consumer throughput %.2f bps exceeded consumer quota %.2f bps by more than %.1f%%' % \
(consumer_maximum_bps, consumer_quota_bps, self.maximum_client_deviation_percentage)
# validate that maximum_broker_byte_out_rate <= consumer_quota * (1 + maximum_broker_deviation_percentage/100)
broker_byte_out_attribute_name = 'kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec:OneMinuteRate'
broker_maximum_byte_out_bps = broker.maximum_jmx_value[broker_byte_out_attribute_name]
self.logger.info('broker has maximum byte-out rate %.2f bps with consumer quota %.2f bps' %
(broker_maximum_byte_out_bps, consumer_quota_bps))
if broker_maximum_byte_out_bps > consumer_quota_bps*(self.maximum_broker_deviation_percentage/100+1):
success = False
msg += 'maximum broker byte-out rate %.2f bps exceeded consumer quota %.2f bps by more than %.1f%%' % \
(broker_maximum_byte_out_bps, consumer_quota_bps, self.maximum_broker_deviation_percentage)
return success, msg
|
anandology/pyjamas
|
refs/heads/master
|
examples/libtest/LoopTest.py
|
12
|
from UnitTest import UnitTest
import time
from write import write, writebr
class A(object):
def __init__(self, x):
self.x = x
def getX(self):
return self.x
def fib(n):
if n<3.0:
return 1.0
return fib(n-2.0)+fib(n-1.0)
def int_fib(n):
if n<3:
return 1
return int_fib(n-2)+int_fib(n-1)
def long_fib(n):
if n<3L:
return 1L
return long_fib(n-2L)+long_fib(n-1L)
class LoopTest(UnitTest):
def testLoop1(self):
t1 = t0 = time.time()
n = 1000
a = A(1)
m = 0;
while t1 - t0 == 0:
m += 1
for i in range(n):
x = a.getX()
t1 = time.time()
dt = t1 - t0
writebr("Loop1: %.2f/sec" % (n*m/dt))
def testLoop2(self):
t1 = t0 = time.time()
n = 100
m = 0.0
while t1 - t0 == 0:
m += 1.0
for i in range(n):
fib(10.0)
t1 = time.time()
dt = t1 - t0
writebr("Loop2 (float): %.2f/sec" % (n*m/dt))
def testLoop3(self):
t1 = t0 = time.time()
n = 100
m = 0.0
while t1 - t0 == 0:
m += 1.0
for i in range(n):
int_fib(10)
t1 = time.time()
dt = t1 - t0
writebr("Loop3 (int): %.2f/sec" % (n*m/dt))
def testLoop4(self):
t1 = t0 = time.time()
n = 100
m = 0.0
while t1 - t0 == 0:
m += 1.0
for i in range(n):
long_fib(10L)
t1 = time.time()
dt = t1 - t0
writebr("Loop4 (long): %.2f/sec" % (n*m/dt))
'''
def testIterList(self):
lst = []
for i in xrange(1000):
lst.append(i)
t1 = t0 = time.time()
n = 100
m = 0.0
while t1 - t0 == 0:
m += 1.0
for x in xrange(20):
for i in lst:
pass
t1 = time.time()
dt = t1 - t0
writebr("IterList: %.2f/sec" % (n*m/dt))
def testEnumerateList(self):
lst = []
for i in xrange(1000):
lst.append(i)
t1 = t0 = time.time()
n = 100
m = 0.0
while t1 - t0 == 0:
m += 1.0
for x in xrange(2):
for i, j in enumerate(lst):
pass
t1 = time.time()
dt = t1 - t0
writebr("EnumerateList: %.2f/sec" % (n*m/dt))
'''
if __name__ == '__main__':
l = LoopTest()
l.run()
|
vnsofthe/odoo-dev
|
refs/heads/master
|
addons/quality_control/models/qc_test_category.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
class QcTestTemplateCategory(models.Model):
_name = 'qc.test.category'
_description = 'Test category'
@api.one
@api.depends('name', 'parent_id')
def _get_complete_name(self):
if self.name:
names = [self.name]
parent = self.parent_id
while parent:
names.append(parent.name)
parent = parent.parent_id
self.complete_name = " / ".join(reversed(names))
else:
self.complete_name = ""
@api.constrains('parent_id')
def _check_recursion(self):
ids = self.ids
level = 100
while ids:
parents = self.search([('id', 'in', ids),
('parent_id', '!=', False)])
ids = list(set([x.parent_id.id for x in parents]))
if not level:
raise exceptions.Warning(
_('Error ! You can not create recursive categories.'))
level -= 1
name = fields.Char('Name', required=True, translate=True)
parent_id = fields.Many2one(
comodel_name='qc.test.category', string='Parent category', select=True)
complete_name = fields.Char(
compute="_get_complete_name", string='全名')
child_ids = fields.One2many(
comodel_name='qc.test.category', inverse_name='parent_id',
string='Child categories')
active = fields.Boolean(
string='Active', default=True,
help="This field allows you to hide the category without removing it.")
|
sriprasanna/django-1.3.1
|
refs/heads/master
|
django/contrib/gis/gdal/tests/test_geom.py
|
154
|
from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, \
OGRException, OGRIndexError, SpatialReference, CoordTransform, \
GDAL_VERSION
from django.utils import unittest
from django.contrib.gis.geometry.test_data import TestDataMixin
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertTrue(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.assertTrue(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex, geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
from django.contrib.gis.gdal.prototypes.geom import GEOJSON
if not GEOJSON: return
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
print "\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n"
try:
c = poly.centroid
except OGRException:
# Should raise an OGR exception, rings are not closed
pass
else:
self.fail('Should have raised an OGRException!')
print "\nEND - expecting IllegalArgumentException; safe to ignore.\n"
# Closing the rings -- doesn't work on GDAL versions 1.4.1 and below:
# http://trac.osgeo.org/gdal/ticket/1673
if GDAL_VERSION <= (1, 4, 1): return
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
import cPickle
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = cPickle.loads(cPickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
CAAD-RWTH/ClockworkForDynamo
|
refs/heads/master
|
nodes/2.x/python/MassFloor.Mass.py
|
4
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetMass(item):
if hasattr(item, "OwningMassId"): return item.Document.GetElement(item.OwningMassId).ToDSType(True)
else: return None
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetMass(x) for x in items]
else: OUT = GetMass(items)
|
Ichag/openerp-server
|
refs/heads/master
|
openerp/addons/base/module/report/__init__.py
|
463
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_module_reference_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jualjiman/knowledge-base
|
refs/heads/master
|
src/knowledge_base/users/apps.py
|
1
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.db.models.signals import post_save
from knowledge_base.utils.signals import generate_thumbnail
class UsersAppConfig(AppConfig):
"""
AppConfig for the ```knowledge_base.users``` module.
"""
name = 'knowledge_base.users'
def ready(self):
super(UsersAppConfig, self).ready()
model = self.get_model('User')
post_save.connect(
generate_thumbnail,
sender=model
)
|
bjlittle/iris
|
refs/heads/pre-commit-ci-update-config
|
lib/iris/tests/system_test.py
|
1
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
This system test module is useful to identify if some of the key components required for Iris are available.
The system tests can be run with ``python setup.py test --system-tests``.
"""
# import iris tests first so that some things can be initialised before importing anything else
import cf_units
import numpy as np
import iris
import iris.tests as tests
class SystemInitialTest(tests.IrisTest):
def system_test_supported_filetypes(self):
nx, ny = 60, 60
data = np.arange(nx * ny, dtype=">f4").reshape(nx, ny)
laty = np.linspace(0, 59, ny).astype("f8")
lonx = np.linspace(30, 89, nx).astype("f8")
def horiz_cs():
return iris.coord_systems.GeogCS(6371229)
cm = iris.cube.Cube(data, "wind_speed", units="m s-1")
cm.add_dim_coord(
iris.coords.DimCoord(
laty, "latitude", units="degrees", coord_system=horiz_cs()
),
0,
)
cm.add_dim_coord(
iris.coords.DimCoord(
lonx, "longitude", units="degrees", coord_system=horiz_cs()
),
1,
)
cm.add_aux_coord(
iris.coords.AuxCoord(
np.array([9], "i8"), "forecast_period", units="hours"
)
)
hours_since_epoch = cf_units.Unit(
"hours since epoch", cf_units.CALENDAR_GREGORIAN
)
cm.add_aux_coord(
iris.coords.AuxCoord(
np.array([3], "i8"), "time", units=hours_since_epoch
)
)
cm.add_aux_coord(
iris.coords.AuxCoord(
np.array([99], "i8"), long_name="pressure", units="Pa"
)
)
filetypes = (".nc", ".pp")
if tests.GRIB_AVAILABLE:
filetypes += (".grib2",)
for filetype in filetypes:
saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
iris.save(cm, saved_tmpfile)
new_cube = iris.load_cube(saved_tmpfile)
self.assertCML(
new_cube, ("system", "supported_filetype_%s.cml" % filetype)
)
@tests.skip_grib
def system_test_grib_patch(self):
import gribapi
gm = gribapi.grib_new_from_samples("GRIB2")
_ = gribapi.grib_get_double(gm, "missingValue")
new_missing_value = 123456.0
gribapi.grib_set_double(gm, "missingValue", new_missing_value)
new_result = gribapi.grib_get_double(gm, "missingValue")
self.assertEqual(new_result, new_missing_value)
def system_test_imports_general(self):
if tests.MPL_AVAILABLE:
import matplotlib # noqa
import netCDF4 # noqa
if __name__ == "__main__":
tests.main()
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/loot/collectible/collectible_parts/shared_light_table_glasstop_01.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/collectible/collectible_parts/shared_light_table_glasstop_01.iff"
result.attribute_template_id = -1
result.stfName("collectible_loot_items_n","light_table_glasstop_01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
dsajkl/123
|
refs/heads/master
|
lms/djangoapps/bulk_email/migrations/0007_load_course_email_template.py
|
182
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
"Load data from fixture."
from django.core.management import call_command
call_command("loaddata", "course_email_template.json")
def backwards(self, orm):
"Perform a no-op to go backwards."
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
symmetrical = True
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/boto-2.46.1/tests/integration/cognito/identity/test_cognito_identity.py
|
112
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.cognito.identity.exceptions import ResourceNotFoundException
from tests.integration.cognito import CognitoTest
class TestCognitoIdentity(CognitoTest):
"""
Test Cognitoy identity pools operations since individual Cognito identities
require an AWS account ID.
"""
def test_cognito_identity(self):
# Ensure the identity pool is in the list of pools.
response = self.cognito_identity.list_identity_pools(max_results=5)
expected_identity = {'IdentityPoolId': self.identity_pool_id,
'IdentityPoolName': self.identity_pool_name}
self.assertIn(expected_identity, response['IdentityPools'])
# Ensure the pool's attributes are as expected.
response = self.cognito_identity.describe_identity_pool(
identity_pool_id=self.identity_pool_id
)
self.assertEqual(response['IdentityPoolName'], self.identity_pool_name)
self.assertEqual(response['IdentityPoolId'], self.identity_pool_id)
self.assertFalse(response['AllowUnauthenticatedIdentities'])
def test_resource_not_found_exception(self):
with self.assertRaises(ResourceNotFoundException):
# Note the region is us-east-0 which is an invalid region name.
self.cognito_identity.describe_identity_pool(
identity_pool_id='us-east-0:c09e640-b014-4822-86b9-ec77c40d8d6f'
)
|
jinankjain/zamboni
|
refs/heads/master
|
mkt/stats/tests/test_views.py
|
1
|
import json
import mock
import requests
from nose.tools import eq_, ok_
from rest_framework.reverse import reverse
from django.conf import settings
import amo
from stats.models import Contribution
from mkt.api.tests.test_oauth import RestOAuth
from mkt.site.fixtures import fixture
from mkt.stats.views import APP_STATS, STATS, _get_monolith_data
class StatsAPITestMixin(object):
def setUp(self):
super(StatsAPITestMixin, self).setUp()
patches = [
mock.patch('monolith.client.Client'),
mock.patch.object(settings, 'MONOLITH_SERVER', 'http://0.0.0.0:0'),
]
for patch in patches:
patch.start()
self.addCleanup(patch.stop)
def test_cors(self):
res = self.client.get(self.url(), data=self.data)
self.assertCORS(res, 'get')
def test_verbs(self):
self._allowed_verbs(self.url(), ['get'])
@mock.patch('monolith.client.Client')
def test_monolith_down(self, mocked):
mocked.side_effect = requests.ConnectionError
res = self.client.get(self.url(), data=self.data)
eq_(res.status_code, 503)
def test_anon(self):
res = self.anon.get(self.url())
eq_(res.status_code, 403)
class TestGlobalStatsResource(StatsAPITestMixin, RestOAuth):
def setUp(self):
super(TestGlobalStatsResource, self).setUp()
self.grant_permission(self.profile, 'Stats:View')
self.data = {'start': '2013-04-01',
'end': '2013-04-15',
'interval': 'day'}
def url(self, metric=None):
metric = metric or STATS.keys()[0]
return reverse('global_stats', kwargs={'metric': metric})
def test_bad_metric(self):
res = self.client.get(self.url('foo'))
eq_(res.status_code, 404)
def test_missing_args(self):
res = self.client.get(self.url())
eq_(res.status_code, 400)
data = json.loads(res.content)
for f in ('start', 'end', 'interval'):
eq_(data['detail'][f], ['This field is required.'])
def test_good(self):
res = self.client.get(self.url(), data=self.data)
eq_(res.status_code, 200)
eq_(json.loads(res.content)['objects'], [])
@mock.patch('monolith.client.Client')
def test_dimensions(self, mocked):
client = mock.MagicMock()
mocked.return_value = client
data = self.data.copy()
data.update({'region': 'br', 'package_type': 'hosted'})
res = self.client.get(self.url('apps_added_by_package'), data=data)
eq_(res.status_code, 200)
ok_(client.called)
eq_(client.call_args[1], {'region': 'br', 'package_type': 'hosted'})
@mock.patch('monolith.client.Client')
def test_dimensions_default(self, mocked):
client = mock.MagicMock()
mocked.return_value = client
res = self.client.get(self.url('apps_added_by_package'),
data=self.data)
eq_(res.status_code, 200)
ok_(client.called)
eq_(client.call_args[1], {'region': 'us', 'package_type': 'hosted'})
@mock.patch('monolith.client.Client')
def test_dimensions_default_is_none(self, mocked):
client = mock.MagicMock()
mocked.return_value = client
res = self.client.get(self.url('apps_installed'), data=self.data)
eq_(res.status_code, 200)
ok_(client.called)
eq_(client.call_args[1], {})
data = self.data.copy()
data['region'] = 'us'
res = self.client.get(self.url('apps_installed'), data=data)
eq_(res.status_code, 200)
ok_(client.called)
eq_(client.call_args[1], {'region': 'us'})
@mock.patch('monolith.client.Client')
def test_coersion(self, mocked):
client = mock.MagicMock()
client.return_value = [{'count': 1.99, 'date': '2013-10-10'}]
mocked.return_value = client
data = _get_monolith_data(
{'metric': 'foo', 'coerce': {'count': str}}, '2013-10-10',
'2013-10-10', 'day', {})
eq_(type(data['objects'][0]['count']), str)
class TestAppStatsResource(StatsAPITestMixin, RestOAuth):
fixtures = fixture('user_2519')
def setUp(self):
super(TestAppStatsResource, self).setUp()
self.app = amo.tests.app_factory(status=amo.STATUS_PUBLIC)
self.app.addonuser_set.create(user=self.user)
self.data = {'start': '2013-04-01', 'end': '2013-04-15',
'interval': 'day'}
def url(self, pk=None, metric=None):
pk = pk or self.app.pk
metric = metric or APP_STATS.keys()[0]
return reverse('app_stats', kwargs={'pk': pk, 'metric': metric})
def test_owner(self):
res = self.client.get(self.url(), data=self.data)
eq_(res.status_code, 200)
def test_perms(self):
self.app.addonuser_set.all().delete()
self.grant_permission(self.profile, 'Stats:View')
res = self.client.get(self.url(), data=self.data)
eq_(res.status_code, 200)
def test_bad_app(self):
res = self.client.get(self.url(pk=99999999))
eq_(res.status_code, 404)
def test_bad_metric(self):
res = self.client.get(self.url(metric='foo'))
eq_(res.status_code, 404)
def test_missing_args(self):
res = self.client.get(self.url())
eq_(res.status_code, 400)
data = json.loads(res.content)
for f in ('start', 'end', 'interval'):
eq_(data['detail'][f], ['This field is required.'])
class TestGlobalStatsTotalResource(StatsAPITestMixin, RestOAuth):
fixtures = fixture('user_2519')
def setUp(self):
super(TestGlobalStatsTotalResource, self).setUp()
self.grant_permission(self.profile, 'Stats:View')
self.data = None # For the mixin tests.
def url(self):
return reverse('global_stats_total')
def test_perms(self):
res = self.client.get(self.url())
eq_(res.status_code, 200)
class TestAppStatsTotalResource(StatsAPITestMixin, RestOAuth):
fixtures = fixture('user_2519')
def setUp(self):
super(TestAppStatsTotalResource, self).setUp()
self.app = amo.tests.app_factory(status=amo.STATUS_PUBLIC)
self.app.addonuser_set.create(user=self.user)
self.data = None # For the mixin tests.
def url(self, pk=None, metric=None):
pk = pk or self.app.pk
return reverse('app_stats_total', kwargs={'pk': pk})
def test_owner(self):
res = self.client.get(self.url())
eq_(res.status_code, 200)
def test_perms(self):
self.app.addonuser_set.all().delete()
self.grant_permission(self.profile, 'Stats:View')
res = self.client.get(self.url())
eq_(res.status_code, 200)
def test_bad_app(self):
res = self.client.get(self.url(pk=99999999))
eq_(res.status_code, 404)
class TestTransactionResource(RestOAuth):
fixtures = fixture('prices', 'user_2519', 'webapp_337141')
def setUp(self):
super(TestTransactionResource, self).setUp()
Contribution.objects.create(
addon_id=337141,
amount='1.89',
currency='EUR',
price_tier_id=2,
uuid='abcdef123456',
transaction_id='abc-def',
type=1,
user=self.user
)
def url(self, t_id=None):
t_id = t_id or 'abc-def'
return reverse('transaction_api', kwargs={'transaction_id': t_id})
def test_cors(self):
res = self.client.get(self.url())
self.assertCORS(res, 'get')
def test_verbs(self):
self.grant_permission(self.profile, 'RevenueStats:View')
self._allowed_verbs(self.url(), ['get'])
def test_anon(self):
res = self.anon.get(self.url())
eq_(res.status_code, 403)
def test_bad_txn(self):
self.grant_permission(self.profile, 'RevenueStats:View')
res = self.client.get(self.url('foo'))
eq_(res.status_code, 404)
def test_good_but_no_permission(self):
res = self.client.get(self.url())
eq_(res.status_code, 403)
def test_good(self):
self.grant_permission(self.profile, 'RevenueStats:View')
res = self.client.get(self.url())
eq_(res.status_code, 200)
obj = json.loads(res.content)
eq_(obj['id'], 'abc-def')
eq_(obj['app_id'], 337141)
eq_(obj['amount_USD'], '1.99')
eq_(obj['type'], 'Purchase')
|
spookylukey/django-debug-toolbar
|
refs/heads/master
|
tests/panels/test_redirects.py
|
7
|
from __future__ import absolute_import, unicode_literals
import django
from django.conf import settings
from django.http import HttpResponse
from django.test.utils import override_settings
from ..base import BaseTestCase
from debug_toolbar.compat import unittest
@override_settings(DEBUG_TOOLBAR_CONFIG={'INTERCEPT_REDIRECTS': True})
class RedirectsPanelTestCase(BaseTestCase):
def setUp(self):
super(RedirectsPanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('RedirectsPanel')
def test_regular_response(self):
response = self.panel.process_response(self.request, self.response)
self.assertTrue(response is self.response)
def test_not_a_redirect(self):
redirect = HttpResponse(status=304) # not modified
response = self.panel.process_response(self.request, redirect)
self.assertTrue(response is redirect)
def test_redirect(self):
redirect = HttpResponse(status=302)
redirect['Location'] = 'http://somewhere/else/'
response = self.panel.process_response(self.request, redirect)
self.assertFalse(response is redirect)
self.assertContains(response, '302 FOUND')
self.assertContains(response, 'http://somewhere/else/')
def test_redirect_with_broken_context_processor(self):
context_processors = settings.TEMPLATE_CONTEXT_PROCESSORS + (
'tests.context_processors.broken',
)
with self.settings(TEMPLATE_CONTEXT_PROCESSORS=context_processors):
redirect = HttpResponse(status=302)
redirect['Location'] = 'http://somewhere/else/'
response = self.panel.process_response(self.request, redirect)
self.assertFalse(response is redirect)
self.assertContains(response, '302 FOUND')
self.assertContains(response, 'http://somewhere/else/')
def test_unknown_status_code(self):
redirect = HttpResponse(status=369)
redirect['Location'] = 'http://somewhere/else/'
response = self.panel.process_response(self.request, redirect)
self.assertContains(response, '369 UNKNOWN STATUS CODE')
@unittest.skipIf(django.VERSION[:2] < (1, 6), "reason isn't supported")
def test_unknown_status_code_with_reason(self):
redirect = HttpResponse(status=369, reason='Look Ma!')
redirect['Location'] = 'http://somewhere/else/'
response = self.panel.process_response(self.request, redirect)
self.assertContains(response, '369 Look Ma!')
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_response.
"""
redirect = HttpResponse(status=304)
response = self.panel.process_response(self.request, redirect)
self.assertIsNotNone(response)
response = self.panel.generate_stats(self.request, redirect)
self.assertIsNone(response)
|
zhouzhenghui/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/lib2to3/fixes/fix_reduce.py
|
203
|
# Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for reduce().
Makes sure reduce() is imported from the functools module if reduce is
used in that module.
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import touch_import
class FixReduce(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'reduce'
trailer< '('
arglist< (
(not(argument<any '=' any>) any ','
not(argument<any '=' any>) any) |
(not(argument<any '=' any>) any ','
not(argument<any '=' any>) any ','
not(argument<any '=' any>) any)
) >
')' >
>
"""
def transform(self, node, results):
touch_import('functools', 'reduce', node)
|
lstern/SWProxy-plugins
|
refs/heads/master
|
SWParser/parser.py
|
4
|
#!/usr/bin/env python
import csv
import json
import cStringIO
import sys
import struct
import numbers
import os
import codecs
from SWPlugin import SWPlugin
from collections import OrderedDict
from smon_decryptor import decrypt_request, decrypt_response
from monsters import monsters_name_map as name_map
# ref: http://stackoverflow.com/a/5838817/1020222
class DictUnicodeWriter(object):
def __init__(self, f, fieldnames, dialect=csv.excel, encoding="utf-8", newfile=True, **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.DictWriter(self.queue, fieldnames, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
if newfile:
self.writebom()
def writerow(self, D):
self.writer.writerow({k:unicode(v).encode("utf-8") for k, v in D.items()})
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for D in rows:
self.writerow(D)
def writeheader(self):
self.writer.writeheader()
def writebom(self):
"""Write BOM, so excel can identify this as UTF8"""
self.stream.write(u'\ufeff'.encode('utf8'))
def monster_name(uid, default_unknown="???", full=True):
uid = str(uid).ljust(5, "0")
if default_unknown == "???":
default_unknown += "[{uid}]".format(uid=int(uid[:-2]))
if uid in name_map and len(name_map[uid]) > 0:
return name_map[uid]
awakened = True if int(uid[-2]) else False
if uid[:-2] in name_map and len(name_map[uid[:-2]]) > 0:
name = name_map[uid[:-2]]
else:
name = default_unknown
if full:
attribute = int(uid[-1])
return "%s%s (%s)" % ("AWAKENED " if awakened else "", name, monster_attribute(attribute))
elif not awakened:
return name
return default_unknown
def monster_attribute(attribute):
name_map = {
1: "Water",
2: "Fire",
3: "Wind",
4: "Light",
5: "Dark"
}
if attribute in name_map:
return name_map[attribute]
else:
return "???[{attr}]".format(attr=attribute)
def rune_effect_type(id, mode=0):
"""mode 0 = rune optimizer, mode 1 = csv export"""
if mode != 0 and mode != 1:
raise ValueError('Should be 0 (optimizer) or 1 (csv)')
effect_type_map = {
0: ("",""),
1: ("HP flat", "HP +%s"),
2: ("HP%", "HP %s%%"),
3: ("ATK flat", "ATK +%s"),
4: ("ATK%", "ATK %s%%"),
5: ("DEF flat", "DEF +%s"),
6: ("DEF%", "DEF %s%%"),
# 7: "UNKNOWN", # ?
8: ("SPD", "SPD +%s"),
9: ("CRate", "CRI Rate %s%%"),
10: ("CDmg", "CRI Dmg %s%%"),
11: ("RES", "Resistance %s%%"),
12: ("ACC", "Accuracy %s%%")
}
return effect_type_map[id][mode] if id in effect_type_map else "UNKNOWN"
def rune_effect(eff):
typ = eff[0]
value = eff[1]
flats = [1,3,5,8]
if len(eff) > 3:
if eff[3] != 0:
if typ in flats:
value = "%s -> +%s" % (value, str(int(value) + int(eff[3])))
else:
value = "%s%% -> %s" % (value, str(int(value) + int(eff[3])))
if typ == 0:
ret = ""
elif typ == 7 or typ > 12:
ret = "UNK %s %s" % (typ, value)
else:
ret = rune_effect_type(typ,1) % value
if len(eff) > 2:
if eff[2] != 0:
ret = "%s (Converted)" % ret
return ret
def rune_set_id(id):
name_map = {
1: "Energy",
2: "Guard",
3: "Swift",
4: "Blade",
5: "Rage",
6: "Focus",
7: "Endure",
8: "Fatal",
10: "Despair",
11: "Vampire",
13: "Violent",
14: "Nemesis",
15: "Will",
16: "Shield",
17: "Revenge",
18: "Destroy",
19: "Fight",
20: "Determination",
21: "Enhance",
22: "Accuracy",
23: "Tolerance",
}
if id in name_map:
return name_map[id]
else:
return "???"
def map_craft(craft, craft_id):
type_str = str(craft['craft_type_id'])
return {
'id': craft_id,
'item_id': craft['craft_item_id'],
'type': 'E' if craft['craft_type'] == 1 else 'G',
'set': rune_set_id(int(type_str[:-4])),
'stat': rune_effect_type(int(type_str[-4:-2])),
'grade': int(type_str[-1:])
}
def map_rune(rune, rune_id, monster_id=0, monster_uid=0):
cvs_map ={
'slot': rune['slot_no'],
'rune_set': rune_set_id(rune['set_id']),
'rune_grade': rune['class'],
'rune_level': rune['upgrade_curr'],
'pri_eff': rune_effect(rune['pri_eff']),
'pre_eff': rune_effect(rune['prefix_eff'])
}
if rune_id != None:
cvs_map.update({
'sell_price': rune['sell_value'],
'rune_id': rune_id,
'monster_id': '%s (%s)' % (monster_id, monster_name(monster_uid)) if monster_id != 0 else '0',
})
for i in range(0, len(rune['sec_eff'])):
cvs_map['sub' + str(i + 1)] = rune_effect(rune['sec_eff'][i])
subs = {
'ATK flat': '-',
'ATK%': '-',
'HP flat': '-',
'HP%': '-',
'DEF flat': '-',
'DEF%': '-',
'RES': '-',
'ACC': '-',
'SPD': '-',
'CDmg': '-',
'CRate': '-',
}
for sec_eff in rune['sec_eff']:
subs[rune_effect_type(sec_eff[0])] = sec_eff[1] + (sec_eff[3] if len(sec_eff) > 2 else 0)
optimizer_map = {"id": rune_id,
"unique_id": rune['rune_id'],
"monster": monster_id,
"monster_n":monster_name(monster_uid, "Unknown name"),
"set": rune_set_id(rune['set_id']),
"slot": rune['slot_no'],
"grade": rune['class'],
"level": rune['upgrade_curr'],
"m_t": rune_effect_type(rune['pri_eff'][0]),
"m_v": rune['pri_eff'][1],
"i_t": rune_effect_type(rune['prefix_eff'][0]),
"i_v": rune['prefix_eff'][1],
"locked":0,
"sub_res": subs['RES'],
"sub_cdmg": subs['CDmg'],
"sub_atkf": subs['ATK flat'],
"sub_acc": subs['ACC'],
"sub_atkp": subs['ATK%'],
"sub_defp": subs['DEF%'],
"sub_deff": subs['DEF flat'],
"sub_hpp": subs['HP%'],
"sub_hpf": subs['HP flat'],
"sub_spd": subs['SPD'],
"sub_crate": subs['CRate']}
for sub in range(0,4):
optimizer_map['s%s_t' % (sub + 1)] = rune_effect_type(rune['sec_eff'][sub][0]) if len(rune['sec_eff']) >= sub + 1 else ""
optimizer_map['s%s_v' % (sub + 1)] = rune['sec_eff'][sub][1] +\
(rune['sec_eff'][sub][3] if len(rune['sec_eff'][sub]) > 2 else 0) \
if len(rune['sec_eff']) >= sub + 1 else 0
optimizer_map['s%s_data' % (sub + 1)] = {"enchanted": rune['sec_eff'][sub][2] == 1,
"gvalue": rune['sec_eff'][sub][3]} \
if len(rune['sec_eff']) >= sub + 1 and len(rune['sec_eff'][sub]) > 2 else {}
return optimizer_map, cvs_map
def map_monster(monster, monster_id_mapping, storage_id, wizard_name=None):
csv_map = {
'name': monster_name(monster['unit_master_id']),
'level': monster['unit_level'],
'grade': monster['class'],
'attribute': monster_attribute(monster['attribute']),
'in_storage': "Yes" if monster['building_id'] == storage_id else "No",
'hp': int(monster['con']) * 15,
'atk': monster['atk'],
'def': monster['def'],
'spd': monster['spd'],
'crate': monster['critical_rate'],
'cdmg': monster['critical_damage'],
'res': monster['resist'],
'acc': monster['accuracy']
}
if wizard_name is None:
csv_map['id'] = monster_id_mapping[monster['unit_id']]
else:
csv_map.update({'wizard_name' : wizard_name}),
if monster_id_mapping:
optimizer_monster = {"id": monster_id_mapping[monster['unit_id']],
"name":"%s%s" % (monster_name(monster['unit_master_id'], "Unknown name"),
" (In Storage)" if monster['building_id'] == storage_id else ""),
"level": monster['unit_level'],
"unit_id": monster['unit_id'],
"master_id": monster['unit_master_id'],
"stars": monster['class'],
"attribute": monster_attribute(monster['attribute']),
"b_hp": int(monster['con']) * 15,
"b_atk": monster['atk'],
"b_def": monster['def'],
"b_spd": monster['spd'],
"b_crate": monster['critical_rate'],
"b_cdmg": monster['critical_damage'],
"b_res": monster['resist'],
"b_acc": monster['accuracy']}
else:
optimizer_monster = None
return optimizer_monster, csv_map
|
staar/empty_project
|
refs/heads/master
|
build/tem/generate_bin.py
|
1
|
import sys
sys.path.append("./python_files/")
import configure
configuration = configure.configure()
print configuration
configuration.read()
print configuration
configuration.write_all()
|
shenlong3030/asv-django-guestbook
|
refs/heads/master
|
django/middleware/gzip.py
|
13
|
import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth compressing non-OK or really short responses.
if response.status_code != 200 or len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped respones of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
response.content = compress_string(response.content)
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(response.content))
return response
|
seem-sky/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/distutils/tests/test_install_headers.py
|
147
|
"""Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
statsmodels/statsmodels.github.io
|
refs/heads/master
|
v0.10.2/plots/graphics_regression_influence.py
|
5
|
# -*- coding: utf-8 -*-
'''
Using a model built from the the state crime dataset, plot the influence in
regression. Observations with high leverage, or large residuals will be
labeled in the plot to show potential influence points.
'''
import statsmodels.api as sm
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
crime_data = sm.datasets.statecrime.load_pandas()
results = smf.ols('murder ~ hs_grad + urban + poverty + single',
data=crime_data.data).fit()
sm.graphics.influence_plot(results)
plt.show()
|
cgar/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/lint/tests/test_lint.py
|
59
|
from __future__ import unicode_literals
import os
import mock
import pytest
import six
from .. import lint as lint_mod
from ..lint import filter_whitelist_errors, parse_whitelist, lint
_dummy_repo = os.path.join(os.path.dirname(__file__), "dummy")
def _mock_lint(name):
wrapped = getattr(lint_mod, name)
return mock.patch(lint_mod.__name__ + "." + name, wraps=wrapped)
def test_filter_whitelist_errors():
whitelist = {
'svg/*': {
'CONSOLE': {12},
'INDENT TABS': {None}
}
}
# parse_whitelist normalises the case/path of the match string so need to do the same
whitelist = {os.path.normcase(p): e for p, e in whitelist.items()}
# paths passed into filter_whitelist_errors are always Unix style
filteredfile = 'svg/test.html'
unfilteredfile = 'html/test.html'
# Tests for passing no errors
filtered = filter_whitelist_errors(whitelist, filteredfile, [])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, unfilteredfile, [])
assert filtered == []
# Tests for filtering on file and line number
filtered = filter_whitelist_errors(whitelist, filteredfile, [['CONSOLE', '', filteredfile, 12]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, unfilteredfile, [['CONSOLE', '', unfilteredfile, 12]])
assert filtered == [['CONSOLE', '', unfilteredfile, 12]]
filtered = filter_whitelist_errors(whitelist, filteredfile, [['CONSOLE', '', filteredfile, 11]])
assert filtered == [['CONSOLE', '', filteredfile, 11]]
# Tests for filtering on just file
filtered = filter_whitelist_errors(whitelist, filteredfile, [['INDENT TABS', filteredfile, '', 12]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, filteredfile, [['INDENT TABS', filteredfile, '', 11]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, unfilteredfile, [['INDENT TABS', unfilteredfile, '', 11]])
assert filtered == [['INDENT TABS', unfilteredfile, '', 11]]
def test_parse_whitelist():
input_buffer = six.StringIO("""
# Comment
CR AT EOL: svg/import/*
CR AT EOL: streams/resources/test-utils.js
INDENT TABS: .gitmodules
INDENT TABS: app-uri/*
INDENT TABS: svg/*
TRAILING WHITESPACE: app-uri/*
CONSOLE:streams/resources/test-utils.js: 12
*:*.pdf
*:resources/*
""")
expected_data = {
'.gitmodules': {
'INDENT TABS': {None},
},
'app-uri/*': {
'TRAILING WHITESPACE': {None},
'INDENT TABS': {None},
},
'streams/resources/test-utils.js': {
'CONSOLE': {12},
'CR AT EOL': {None},
},
'svg/*': {
'INDENT TABS': {None},
},
'svg/import/*': {
'CR AT EOL': {None},
},
}
expected_data = {os.path.normcase(p): e for p, e in expected_data.items()}
expected_ignored = {os.path.normcase(x) for x in {"*.pdf", "resources/*"}}
data, ignored = parse_whitelist(input_buffer)
assert data == expected_data
assert ignored == expected_ignored
def test_lint_no_files(capsys):
rv = lint(_dummy_repo, [], False, False)
assert rv == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_lint_ignored_file(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken_ignored.html"], False, False)
assert rv == 0
assert not mocked_check_path.called
assert not mocked_check_file_contents.called
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_lint_not_existing_file(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
# really long path-linted filename
name = "a" * 256 + ".html"
rv = lint(_dummy_repo, [name], False, False)
assert rv == 0
assert not mocked_check_path.called
assert not mocked_check_file_contents.called
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_lint_passing(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["okay.html"], False, False)
assert rv == 0
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_lint_failing(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken.html"], False, False)
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
out, err = capsys.readouterr()
assert "TRAILING WHITESPACE" in out
assert "broken.html 1 " in out
assert err == ""
def test_lint_passing_and_failing(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken.html", "okay.html"], False, False)
assert rv == 1
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2
out, err = capsys.readouterr()
assert "TRAILING WHITESPACE" in out
assert "broken.html 1 " in out
assert "okay.html" not in out
assert err == ""
|
sandeepgupta2k4/tensorflow
|
refs/heads/master
|
tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py
|
102
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VariableClippingOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import socket
import numpy as np
from tensorflow.contrib.opt.python.training import variable_clipping_optimizer
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import server_lib
class VariableClippingOptimizerTest(test.TestCase):
def _setupCluster(self):
def get_open_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except IOError:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
port1 = get_open_port()
port2 = get_open_port()
cs = server_lib.ClusterSpec({
"worker": ["localhost:%s" % port1],
"ps": ["localhost:%s" % port2]
})
worker = server_lib.Server(cs, job_name="worker", start=True)
ps = server_lib.Server(cs, job_name="ps", start=True)
return worker, ps
@contextlib.contextmanager
def _maybeWithDevice(self, device):
if device is not None:
with ops.device(device):
yield
else:
yield
def _setupDense(self, is_distributed, dtype):
with self._maybeWithDevice("/job:ps" if is_distributed else None):
var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype)
var1 = variables.Variable([4.0, 5.0], dtype=dtype)
with self._maybeWithDevice("/job:worker" if is_distributed else None):
grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd = gradient_descent.GradientDescentOptimizer(3.0)
clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(
sgd, {var0: [1]}, 2.0)
update_op = clip_opt.apply_gradients(
list(zip([grads0, grads1], [var0, var1])))
variables.global_variables_initializer().run()
return var0, var1, update_op
def _assertDenseCorrect(self, var0, var1, update_op):
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0]], var0.eval())
self.assertAllCloseAccordingToType([4.0, 5.0], var1.eval())
# Run 1 step of sgd, clipping each var0[i] to max L2-norm 2.0
update_op.run()
# Validate updated params
var0_out = var0.eval()
# var0[0] has norm < 2.0, so it is not clipped.
self.assertAllCloseAccordingToType([(0.0 - 3.0 * 0.1), (1.0 - 3.0 * 0.1)],
var0_out[0])
# var0[1] has norm > 2.0, so it is clipped.
expected_unclipped = np.array([(2.0 - 3.0 * 0.1), (3.0 - 3.0 * 0.1)])
self.assertAllCloseAccordingToType(2.0 * expected_unclipped /
np.linalg.norm(expected_unclipped),
var0_out[1])
# var1 is not in the var list, so it should not be clipped
self.assertAllCloseAccordingToType([4.0 - 3.0 * 0.01, 5.0 - 3.0 * 0.01],
var1.eval())
def _setupSparse(self, is_distributed, dtype):
with self._maybeWithDevice("/job:ps" if is_distributed else None):
var0 = variables.Variable(
[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype)
var1 = variables.Variable(
[[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]], dtype=dtype)
with self._maybeWithDevice("/job:worker" if is_distributed else None):
grads = ops.IndexedSlices(
constant_op.constant(
[[0.1, 0.1], [0.1, 0.1]], dtype=dtype), [0, 2], [3, 2])
sgd = gradient_descent.GradientDescentOptimizer(3.0)
clip_opt = variable_clipping_optimizer.VariableClippingOptimizer(
sgd, {var0: [1],
var1: [0]}, 2.0)
update_op = clip_opt.apply_gradients(
list(zip([grads, grads], [var0, var1])))
variables.global_variables_initializer().run()
return var0, var1, update_op
def _assertSparseCorrect(self, var0, var1, update_op):
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]],
var0.eval())
self.assertAllCloseAccordingToType([[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]],
var1.eval())
# Run 1 step of sgd
update_op.run()
# var1 is clipped along the sparse dimension, so defaults to using dense
# calculations. There should be a warning logged, but the numerics
# should still be correct.
var1_out = var1.eval()
# var1[:, 0] has norm < 2.0, so it is not clipped.
self.assertAllCloseAccordingToType(
[(0.0 - 3.0 * 0.1), 0.0, (0.0 - 3.0 * 0.1)], var1_out[:, 0])
# var1[:, 1] has norm > 2.0, so it is clipped.
expected_unclipped = np.array([(1.0 - 3.0 * 0.1), 3.0, (5.0 - 3.0 * 0.1)])
self.assertAllCloseAccordingToType(2.0 * expected_unclipped /
np.linalg.norm(expected_unclipped),
var1_out[:, 1])
# Validate updated params
var0_out = var0.eval()
# var0[0] has norm < 2.0, so it is not clipped.
self.assertAllCloseAccordingToType([(0.0 - 3.0 * 0.1), (1.0 - 3.0 * 0.1)],
var0_out[0])
# var0[1] has no gradients, so it should remain unchanged.
self.assertAllCloseAccordingToType([2.0, 3.0], var0_out[1])
# var0[2] has norm > 2.0, so it is clipped.
expected_unclipped = np.array([(4.0 - 3.0 * 0.1), (5.0 - 3.0 * 0.1)])
self.assertAllCloseAccordingToType(2.0 * expected_unclipped /
np.linalg.norm(expected_unclipped),
var0_out[2])
def testDenseLocal(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.half]:
with self.test_session():
var0, var1, update_op = self._setupDense(False, dtype)
self._assertDenseCorrect(var0, var1, update_op)
def testDenseDistributed(self):
worker, unused_ps = self._setupCluster()
for dtype in [dtypes.float64, dtypes.half, dtypes.float32]:
with session.Session(worker.target):
var0, var1, update_op = self._setupDense(True, dtype)
self._assertDenseCorrect(var0, var1, update_op)
def testSparseLocal(self):
for dtype in [dtypes.float64, dtypes.float32, dtypes.half]:
with self.test_session():
var0, var1, update_op = self._setupSparse(False, dtype)
self._assertSparseCorrect(var0, var1, update_op)
def testSparseDistributed(self):
worker, unused_ps = self._setupCluster()
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with session.Session(worker.target):
var0, var1, update_op = self._setupSparse(True, dtype)
self._assertSparseCorrect(var0, var1, update_op)
if __name__ == "__main__":
test.main()
|
linjoahow/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testhelpers.py
|
737
|
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertTrue(call(1, 2, 3) in [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
for spec in (SomeClass, SomeClass()):
mock = create_autospec(spec)
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self):
pass
class Bar(object):
def f(self):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a()
this_mock.a.assert_called_with()
self.assertRaises(TypeError, this_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a()
instance_mock.a.assert_called_with()
self.assertRaises(TypeError, instance_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f()
instance_mock.Bar.f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f()
instance_mock.Bar().f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock(4, 5)
mock.assert_called_with(4, 5)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
instance = mock()
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
mock = create_autospec(Callable())
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(self=10)
a.f.assert_called_with(self=10)
def test_autospec_property(self):
class Foo(object):
@property
def foo(self):
return 3
foo = create_autospec(Foo)
mock_property = foo.foo
# no spec on properties
self.assertTrue(isinstance(mock_property, MagicMock))
mock_property(1, 2, 3)
mock_property.abc(4, 5, 6)
mock_property.assert_called_once_with(1, 2, 3)
mock_property.abc.assert_called_once_with(4, 5, 6)
def test_autospec_slots(self):
class Foo(object):
__slots__ = ['a']
foo = create_autospec(Foo)
mock_slot = foo.a
# no spec on slots
mock_slot(1, 2, 3)
mock_slot.abc(4, 5, 6)
mock_slot.assert_called_once_with(1, 2, 3)
mock_slot.abc.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertTrue(kall in mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
self.assertFalse(call('fish') in mock.call_args_list)
self.assertFalse([call('fish')] in mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
|
zenlambda/pip
|
refs/heads/develop
|
tests/functional/test_requests.py
|
58
|
import pytest
@pytest.mark.skipif
def test_timeout(script):
result = script.pip(
"--timeout", "0.01", "install", "-vvv", "INITools",
expect_error=True,
)
assert (
"Could not fetch URL https://pypi.python.org/simple/INITools/: "
"timed out" in result.stdout
)
assert (
"Could not fetch URL https://pypi.python.org/simple/: "
"timed out" in result.stdout
)
|
gnowgi/gnowsys-studio
|
refs/heads/master
|
gstudio/feeds.py
|
3
|
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Feeds for Gstudio"""
from urlparse import urljoin
from BeautifulSoup import BeautifulSoup
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext as _
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import NoReverseMatch
from django.core.exceptions import ObjectDoesNotExist
from tagging.models import Tag
from tagging.models import TaggedItem
from gstudio.models import Nodetype
from gstudio.settings import COPYRIGHT
from gstudio.settings import PROTOCOL
from gstudio.settings import FEEDS_FORMAT
from gstudio.settings import FEEDS_MAX_ITEMS
from gstudio.managers import nodetypes_published
from gstudio.views.metatypes import get_metatype_or_404
from gstudio.templatetags.gstudio_tags import get_gravatar
class GstudioFeed(Feed):
"""Base Feed for Gstudio"""
feed_copyright = COPYRIGHT
def __init__(self):
self.site = Site.objects.get_current()
self.site_url = '%s://%s' % (PROTOCOL, self.site.domain)
if FEEDS_FORMAT == 'atom':
self.feed_type = Atom1Feed
self.subtitle = self.description
class NodetypeFeed(GstudioFeed):
"""Base Nodetype Feed"""
title_template = 'feeds/nodetype_title.html'
description_template = 'feeds/nodetype_description.html'
def item_pubdate(self, item):
"""Publication date of a nodetype"""
return item.creation_date
def item_metatypes(self, item):
"""Nodetype's metatypes"""
return [metatype.title for metatype in item.metatypes.all()]
def item_author_name(self, item):
"""Returns the first author of a nodetype"""
if item.authors.count():
self.item_author = item.authors.all()[0]
return self.item_author.username
def item_author_email(self, item):
"""Returns the first author's email"""
return self.item_author.email
def item_author_link(self, item):
"""Returns the author's URL"""
try:
author_url = reverse('gstudio_author_detail',
args=[self.item_author.username])
return self.site_url + author_url
except NoReverseMatch:
return self.site_url
def item_enclosure_url(self, item):
"""Returns an image for enclosure"""
if item.image:
return item.image.url
img = BeautifulSoup(item.html_content).find('img')
if img:
return urljoin(self.site_url, img['src'])
def item_enclosure_length(self, item):
"""Hardcoded enclosure length"""
return '100000'
def item_enclosure_mime_type(self, item):
"""Hardcoded enclosure mimetype"""
return 'image/jpeg'
class LatestNodetypes(NodetypeFeed):
"""Feed for the latest nodetypes"""
def link(self):
"""URL of latest nodetypes"""
return reverse('gstudio_nodetype_archive_index')
def items(self):
"""Items are published nodetypes"""
return Nodetype.published.all()[:FEEDS_MAX_ITEMS]
def title(self):
"""Title of the feed"""
return '%s - %s' % (self.site.name, _('Latest nodetypes'))
def description(self):
"""Description of the feed"""
return _('The latest nodetypes for the site %s') % self.site.name
class MetatypeNodetypes(NodetypeFeed):
"""Feed filtered by a metatype"""
def get_object(self, request, path):
"""Retrieve the metatype by his path"""
return get_metatype_or_404(path)
def items(self, obj):
"""Items are the published nodetypes of the metatype"""
return obj.nodetypes_published()[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the metatype"""
return obj.get_absolute_url()
def title(self, obj):
"""Title of the feed"""
return _('Nodetypes for the metatype %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest nodetypes for the metatype %s') % obj.title
class AuthorNodetypes(NodetypeFeed):
"""Feed filtered by an author"""
def get_object(self, request, username):
"""Retrieve the author by his username"""
return get_object_or_404(User, username=username)
def items(self, obj):
"""Items are the published nodetypes of the author"""
return nodetypes_published(obj.nodetypes)[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the author"""
return reverse('gstudio_author_detail', args=[obj.username])
def title(self, obj):
"""Title of the feed"""
return _('Nodetypes for author %s') % obj.username
def description(self, obj):
"""Description of the feed"""
return _('The latest nodetypes by %s') % obj.username
class TagNodetypes(NodetypeFeed):
"""Feed filtered by a tag"""
def get_object(self, request, slug):
"""Retrieve the tag by his name"""
return get_object_or_404(Tag, name=slug)
def items(self, obj):
"""Items are the published nodetypes of the tag"""
return TaggedItem.objects.get_by_model(
Nodetype.published.all(), obj)[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the tag"""
return reverse('gstudio_tag_detail', args=[obj.name])
def title(self, obj):
"""Title of the feed"""
return _('Nodetypes for the tag %s') % obj.name
def description(self, obj):
"""Description of the feed"""
return _('The latest nodetypes for the tag %s') % obj.name
class SearchNodetypes(NodetypeFeed):
"""Feed filtered by a search pattern"""
def get_object(self, request):
"""The GET parameter 'pattern' is the object"""
pattern = request.GET.get('pattern', '')
if len(pattern) < 3:
raise ObjectDoesNotExist
return pattern
def items(self, obj):
"""Items are the published nodetypes founds"""
return Nodetype.published.search(obj)[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the search request"""
return '%s?pattern=%s' % (reverse('gstudio_nodetype_search'), obj)
def title(self, obj):
"""Title of the feed"""
return _("Results of the search for '%s'") % obj
def description(self, obj):
"""Description of the feed"""
return _("The nodetypes containing the pattern '%s'") % obj
class NodetypeDiscussions(GstudioFeed):
"""Feed for discussions in a nodetype"""
title_template = 'feeds/discussion_title.html'
description_template = 'feeds/discussion_description.html'
def get_object(self, request, year, month, day, slug):
"""Retrieve the discussions by nodetype's slug"""
return get_object_or_404(Nodetype.published, slug=slug,
creation_date__year=year,
creation_date__month=month,
creation_date__day=day)
def items(self, obj):
"""Items are the discussions on the nodetype"""
return obj.discussions[:FEEDS_MAX_ITEMS]
def item_pubdate(self, item):
"""Publication date of a discussion"""
return item.submit_date
def item_link(self, item):
"""URL of the discussion"""
return item.get_absolute_url()
def link(self, obj):
"""URL of the nodetype"""
return obj.get_absolute_url()
def item_author_name(self, item):
"""Author of the discussion"""
return item.userinfo['name']
def item_author_email(self, item):
"""Author's email of the discussion"""
return item.userinfo['email']
def item_author_link(self, item):
"""Author's URL of the discussion"""
return item.userinfo['url']
def title(self, obj):
"""Title of the feed"""
return _('Discussions on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest discussions for the nodetype %s') % obj.title
class NodetypeComments(NodetypeDiscussions):
"""Feed for comments in a nodetype"""
title_template = 'feeds/comment_title.html'
description_template = 'feeds/comment_description.html'
def items(self, obj):
"""Items are the comments on the nodetype"""
return obj.comments[:FEEDS_MAX_ITEMS]
def item_link(self, item):
"""URL of the comment"""
return item.get_absolute_url('#comment_%(id)s')
def title(self, obj):
"""Title of the feed"""
return _('Comments on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest comments for the nodetype %s') % obj.title
def item_enclosure_url(self, item):
"""Returns a gravatar image for enclosure"""
return get_gravatar(item.userinfo['email'])
def item_enclosure_length(self, item):
"""Hardcoded enclosure length"""
return '100000'
def item_enclosure_mime_type(self, item):
"""Hardcoded enclosure mimetype"""
return 'image/jpeg'
class NodetypePingbacks(NodetypeDiscussions):
"""Feed for pingbacks in a nodetype"""
title_template = 'feeds/pingback_title.html'
description_template = 'feeds/pingback_description.html'
def items(self, obj):
"""Items are the pingbacks on the nodetype"""
return obj.pingbacks[:FEEDS_MAX_ITEMS]
def item_link(self, item):
"""URL of the pingback"""
return item.get_absolute_url('#pingback_%(id)s')
def title(self, obj):
"""Title of the feed"""
return _('Pingbacks on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest pingbacks for the nodetype %s') % obj.title
class NodetypeTrackbacks(NodetypeDiscussions):
"""Feed for trackbacks in a nodetype"""
title_template = 'feeds/trackback_title.html'
description_template = 'feeds/trackback_description.html'
def items(self, obj):
"""Items are the trackbacks on the nodetype"""
return obj.trackbacks[:FEEDS_MAX_ITEMS]
def item_link(self, item):
"""URL of the trackback"""
return item.get_absolute_url('#trackback_%(id)s')
def title(self, obj):
"""Title of the feed"""
return _('Trackbacks on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest trackbacks for the nodetype %s') % obj.title
|
awsdocs/aws-doc-sdk-examples
|
refs/heads/master
|
python/example_code/rekognition/rekognition_video_detection.py
|
1
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Rekognition to
recognize people and objects in videos.
"""
import logging
import json
from pprint import pprint
import time
import boto3
from botocore.exceptions import ClientError
import requests
from rekognition_objects import (
RekognitionFace, RekognitionCelebrity, RekognitionLabel,
RekognitionModerationLabel, RekognitionPerson)
logger = logging.getLogger(__name__)
class RekognitionVideo:
"""
Encapsulates an Amazon Rekognition video. This class is a thin wrapper around
parts of the Boto3 Amazon Rekognition API.
"""
def __init__(self, video, video_name, rekognition_client):
"""
Initializes the video object.
:param video: Amazon S3 bucket and object key data where the video is located.
:param video_name: The name of the video.
:param rekognition_client: A Boto3 Rekognition client.
"""
self.video = video
self.video_name = video_name
self.rekognition_client = rekognition_client
self.topic = None
self.queue = None
self.role = None
@classmethod
def from_bucket(cls, s3_object, rekognition_client):
"""
Creates a RekognitionVideo object from an Amazon S3 object.
:param s3_object: An Amazon S3 object that contains the video. The video
is not retrieved until needed for a later call.
:param rekognition_client: A Boto3 Rekognition client.
:return: The RekognitionVideo object, initialized with Amazon S3 object data.
"""
video = {'S3Object': {'Bucket': s3_object.bucket_name, 'Name': s3_object.key}}
return cls(video, s3_object.key, rekognition_client)
def create_notification_channel(
self, resource_name, iam_resource, sns_resource, sqs_resource):
"""
Creates a notification channel used by Amazon Rekognition to notify subscribers
that a detection job has completed. The notification channel consists of an
Amazon SNS topic and an Amazon SQS queue that is subscribed to the topic.
After a job is started, the queue is polled for a job completion message.
Amazon Rekognition publishes a message to the topic when a job completes,
which triggers Amazon SNS to send a message to the subscribing queue.
As part of creating the notification channel, an AWS Identity and Access
Management (IAM) role and policy are also created. This role allows Amazon
Rekognition to publish to the topic.
:param resource_name: The name to give to the channel resources that are
created.
:param iam_resource: A Boto3 IAM resource.
:param sns_resource: A Boto3 SNS resource.
:param sqs_resource: A Boto3 SQS resource.
"""
self.topic = sns_resource.create_topic(Name=resource_name)
self.queue = sqs_resource.create_queue(
QueueName=resource_name, Attributes={'ReceiveMessageWaitTimeSeconds': '5'})
queue_arn = self.queue.attributes['QueueArn']
# This policy lets the queue receive messages from the topic.
self.queue.set_attributes(Attributes={'Policy': json.dumps({
'Version': '2008-10-17',
'Statement': [{
'Sid': 'test-sid',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Action': 'SQS:SendMessage',
'Resource': queue_arn,
'Condition': {'ArnEquals': {'aws:SourceArn': self.topic.arn}}}]})})
self.topic.subscribe(Protocol='sqs', Endpoint=queue_arn)
# This role lets Amazon Rekognition publish to the topic. Its Amazon Resource
# Name (ARN) is sent each time a job is started.
self.role = iam_resource.create_role(
RoleName=resource_name,
AssumeRolePolicyDocument=json.dumps({
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {'Service': 'rekognition.amazonaws.com'},
'Action': 'sts:AssumeRole'
}
]
})
)
policy = iam_resource.create_policy(
PolicyName=resource_name,
PolicyDocument=json.dumps({
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 'SNS:Publish',
'Resource': self.topic.arn
}
]
})
)
self.role.attach_policy(PolicyArn=policy.arn)
def get_notification_channel(self):
"""
Gets the role and topic ARNs that define the notification channel.
:return: The notification channel data.
"""
return {'RoleArn': self.role.arn, 'SNSTopicArn': self.topic.arn}
def delete_notification_channel(self):
"""
Deletes all of the resources created for the notification channel.
"""
for policy in self.role.attached_policies.all():
self.role.detach_policy(PolicyArn=policy.arn)
policy.delete()
self.role.delete()
logger.info("Deleted role %s.", self.role.role_name)
self.role = None
self.queue.delete()
logger.info("Deleted queue %s.", self.queue.url)
self.queue = None
self.topic.delete()
logger.info("Deleted topic %s.", self.topic.arn)
self.topic = None
def poll_notification(self, job_id):
"""
Polls the notification queue for messages that indicate a job has completed.
:param job_id: The ID of the job to wait for.
:return: The completion status of the job.
"""
status = None
job_done = False
while not job_done:
messages = self.queue.receive_messages(
MaxNumberOfMessages=1, WaitTimeSeconds=5)
logger.info("Polled queue for messages, got %s.", len(messages))
if messages:
body = json.loads(messages[0].body)
message = json.loads(body['Message'])
if job_id != message['JobId']:
raise RuntimeError
status = message['Status']
logger.info("Got message %s with status %s.", message['JobId'], status)
messages[0].delete()
job_done = True
return status
def _start_rekognition_job(self, job_description, start_job_func):
"""
Starts a job by calling the specified job function.
:param job_description: A description to log about the job.
:param start_job_func: The specific Boto3 Rekognition start job function to
call, such as start_label_detection.
:return: The ID of the job.
"""
try:
response = start_job_func(
Video=self.video, NotificationChannel=self.get_notification_channel())
job_id = response['JobId']
logger.info(
"Started %s job %s on %s.", job_description, job_id, self.video_name)
except ClientError:
logger.exception(
"Couldn't start %s job on %s.", job_description, self.video_name)
raise
else:
return job_id
def _get_rekognition_job_results(self, job_id, get_results_func, result_extractor):
"""
Gets the results of a completed job by calling the specified results function.
Results are extracted into objects by using the specified extractor function.
:param job_id: The ID of the job.
:param get_results_func: The specific Boto3 Rekognition get job results
function to call, such as get_label_detection.
:param result_extractor: A function that takes the results of the job
and wraps the result data in object form.
:return: The list of result objects.
"""
try:
response = get_results_func(JobId=job_id)
logger.info("Job %s has status: %s.", job_id, response['JobStatus'])
results = result_extractor(response)
logger.info("Found %s items in %s.", len(results), self.video_name)
except ClientError:
logger.exception("Couldn't get items for %s.", job_id)
raise
else:
return results
def _do_rekognition_job(
self, job_description, start_job_func, get_results_func, result_extractor):
"""
Starts a job, waits for completion, and gets the results.
:param job_description: The description of the job.
:param start_job_func: The Boto3 start job function to call.
:param get_results_func: The Boto3 get job results function to call.
:param result_extractor: A function that can extract the results into objects.
:return: The list of result objects.
"""
job_id = self._start_rekognition_job(job_description, start_job_func)
status = self.poll_notification(job_id)
if status == 'SUCCEEDED':
results = self._get_rekognition_job_results(
job_id, get_results_func, result_extractor)
else:
results = []
return results
def do_label_detection(self):
"""
Performs label detection on the video.
:return: The list of labels found in the video.
"""
return self._do_rekognition_job(
"label detection",
self.rekognition_client.start_label_detection,
self.rekognition_client.get_label_detection,
lambda response: [
RekognitionLabel(label['Label'], label['Timestamp']) for label in
response['Labels']])
def do_face_detection(self):
"""
Performs face detection on the video.
:return: The list of faces found in the video.
"""
return self._do_rekognition_job(
"face detection",
self.rekognition_client.start_face_detection,
self.rekognition_client.get_face_detection,
lambda response: [
RekognitionFace(face['Face'], face['Timestamp']) for face in
response['Faces']])
def do_person_tracking(self):
"""
Performs person tracking in the video. Person tracking assigns IDs to each
person detected in the video and each detection event is associated with
one of the IDs.
:return: The list of person tracking events found in the video.
"""
return self._do_rekognition_job(
"person tracking",
self.rekognition_client.start_person_tracking,
self.rekognition_client.get_person_tracking,
lambda response: [
RekognitionPerson(person['Person'], person['Timestamp']) for person in
response['Persons']])
def do_celebrity_recognition(self):
"""
Performs celebrity detection on the video.
:return: The list of celebrity detection events found in the video.
"""
return self._do_rekognition_job(
"celebrity recognition",
self.rekognition_client.start_celebrity_recognition,
self.rekognition_client.get_celebrity_recognition,
lambda response: [
RekognitionCelebrity(celeb['Celebrity'], celeb['Timestamp'])
for celeb in response['Celebrities']])
def do_content_moderation(self):
"""
Performs content moderation on the video.
:return: The list of moderation labels found in the video.
"""
return self._do_rekognition_job(
"content moderation",
self.rekognition_client.start_content_moderation,
self.rekognition_client.get_content_moderation,
lambda response: [
RekognitionModerationLabel(label['ModerationLabel'], label['Timestamp'])
for label in response['ModerationLabels']])
def usage_demo():
print('-'*88)
print("Welcome to the Amazon Rekognition video detection demo!")
print('-'*88)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
print("Creating Amazon S3 bucket and uploading video.")
s3_resource = boto3.resource('s3')
bucket = s3_resource.create_bucket(
Bucket=f'doc-example-bucket-rekognition-{time.time_ns()}',
CreateBucketConfiguration={
'LocationConstraint': s3_resource.meta.client.meta.region_name
})
video_object = bucket.Object('bezos_vogel.mp4')
bezos_vogel_video = requests.get(
'https://dhei5unw3vrsx.cloudfront.net/videos/bezos_vogel.mp4', stream=True)
video_object.upload_fileobj(bezos_vogel_video.raw)
rekognition_client = boto3.client('rekognition')
video = RekognitionVideo.from_bucket(video_object, rekognition_client)
print("Creating notification channel from Amazon Rekognition to Amazon SQS.")
iam_resource = boto3.resource('iam')
sns_resource = boto3.resource('sns')
sqs_resource = boto3.resource('sqs')
video.create_notification_channel(
'doc-example-video-rekognition', iam_resource, sns_resource, sqs_resource)
print("Detecting labels in the video.")
labels = video.do_label_detection()
print(f"Detected {len(labels)} labels, here are the first twenty:")
for label in labels[:20]:
pprint(label.to_dict())
input("Press Enter when you're ready to continue.")
print("Detecting faces in the video.")
faces = video.do_face_detection()
print(f"Detected {len(faces)} faces, here are the first ten:")
for face in faces[:10]:
pprint(face.to_dict())
input("Press Enter when you're ready to continue.")
print("Detecting celebrities in the video.")
celebrities = video.do_celebrity_recognition()
print(f"Found {len(celebrities)} celebrity detection events. Here's the first "
f"appearance of each celebrity:")
celeb_names = set()
for celeb in celebrities:
if celeb.name not in celeb_names:
celeb_names.add(celeb.name)
pprint(celeb.to_dict())
input("Press Enter when you're ready to continue.")
print("Tracking people in the video. This takes a little longer. Be patient!")
persons = video.do_person_tracking()
print(f"Detected {len(persons)} person tracking items, here are the first five "
f"for each person:")
by_index = {}
for person in persons:
if person.index not in by_index:
by_index[person.index] = []
by_index[person.index].append(person)
for items in by_index.values():
for item in items[:5]:
pprint(item.to_dict())
input("Press Enter when you're ready to continue.")
print("Deleting resources created for the demo.")
video.delete_notification_channel()
bucket.objects.delete()
bucket.delete()
logger.info("Deleted bucket %s.", bucket.name)
print("All resources cleaned up. Thanks for watching!")
print('-'*88)
if __name__ == '__main__':
usage_demo()
|
e-gob/plataforma-kioscos-autoatencion
|
refs/heads/master
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/module_utils/openshift_common.py
|
96
|
#
# Copyright 2017 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.k8s_common import KubernetesAnsibleException, KubernetesAnsibleModule
try:
from openshift.helper.ansible import OpenShiftAnsibleModuleHelper, ARG_ATTRIBUTES_BLACKLIST
from openshift.helper.exceptions import KubernetesException, OpenShiftException
HAS_OPENSHIFT_HELPER = True
except ImportError as exc:
HAS_OPENSHIFT_HELPER = False
class OpenShiftAnsibleException(KubernetesAnsibleException):
pass
class OpenShiftAnsibleModule(KubernetesAnsibleModule):
def __init__(self, kind, api_version):
if not HAS_OPENSHIFT_HELPER:
raise OpenShiftAnsibleException(
"This module requires the OpenShift Python client. Try `pip install openshift`"
)
try:
super(OpenShiftAnsibleModule, self).__init__(kind, api_version)
except KubernetesAnsibleException as exc:
raise OpenShiftAnsibleException(exc.args)
@staticmethod
def get_helper(api_version, kind):
return OpenShiftAnsibleModuleHelper(api_version, kind)
def _create(self, namespace):
if self.kind.lower() == 'project':
return self._create_project()
else:
return super(OpenShiftAnsibleModule, self)._create(namespace)
def _create_project(self):
new_obj = None
k8s_obj = None
try:
new_obj = self.helper.object_from_params(self.params)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {}".format(exc.message))
try:
k8s_obj = self.helper.create_project(metadata=new_obj.metadata,
display_name=self.params.get('display_name'),
description=self.params.get('description'))
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object',
error=exc.value.get('status'))
return k8s_obj
|
pridemusvaire/yowsup
|
refs/heads/master
|
yowsup/layers/axolotl/store/sqlite/litesessionstore.py
|
53
|
from axolotl.state.sessionstore import SessionStore
from axolotl.state.sessionrecord import SessionRecord
class LiteSessionStore(SessionStore):
def __init__(self, dbConn):
"""
:type dbConn: Connection
"""
self.dbConn = dbConn
dbConn.execute("CREATE TABLE IF NOT EXISTS sessions (_id INTEGER PRIMARY KEY AUTOINCREMENT,"
"recipient_id INTEGER UNIQUE, device_id INTEGER, record BLOB, timestamp INTEGER);")
def loadSession(self, recipientId, deviceId):
q = "SELECT record FROM sessions WHERE recipient_id = ? AND device_id = ?"
c = self.dbConn.cursor()
c.execute(q, (recipientId, deviceId))
result = c.fetchone()
if result:
return SessionRecord(serialized=result[0])
else:
return SessionRecord()
def getSubDeviceSessions(self, recipientId):
q = "SELECT device_id from sessions WHERE recipient_id = ?"
c = self.dbConn.cursor()
c.execute(q, (recipientId,))
result = c.fetchall()
deviceIds = [r[0] for r in result]
return deviceIds
def storeSession(self, recipientId, deviceId, sessionRecord):
self.deleteSession(recipientId, deviceId)
q = "INSERT INTO sessions(recipient_id, device_id, record) VALUES(?,?,?)"
c = self.dbConn.cursor()
c.execute(q, (recipientId, deviceId, sessionRecord.serialize()))
self.dbConn.commit()
def containsSession(self, recipientId, deviceId):
q = "SELECT record FROM sessions WHERE recipient_id = ? AND device_id = ?"
c = self.dbConn.cursor()
c.execute(q, (recipientId, deviceId))
result = c.fetchone()
return result is not None
def deleteSession(self, recipientId, deviceId):
q = "DELETE FROM sessions WHERE recipient_id = ? AND device_id = ?"
self.dbConn.cursor().execute(q, (recipientId, deviceId))
self.dbConn.commit()
def deleteAllSessions(self, recipientId):
q = "DELETE FROM sessions WHERE recipient_id = ?"
self.dbConn.cursor().execute(q, (recipientId,))
self.dbConn.commit()
|
SijmeJan/Astrix
|
refs/heads/master
|
python/astrix/parameterfile.py
|
1
|
#!/usr/bin/python
import os
def ChangeParameter(inFileName, parameter):
"""Edit a valid Astrix input file to change parameters
Given a valid Astrix input file inFileName, edit it to change the parameters as listed in parameter
:param inFileName: Valid Astrix input parameter file.
:param parameter: List of pairs of strings [parameterName, parameterValue]
:type inFileName: string
:type parameter: List of string pairs
"""
fullPath = os.path.abspath(inFileName)
direc = fullPath.rsplit("/",1)
outFileName = direc[0] + '/temp.in'
# Open input file
inFile = open(inFileName, "r")
# Open input file
outFile = open(outFileName, "w")
# Process file line by line
for line in inFile:
# By default, just copy line
lineNew = line
for p in parameter:
if(p[0] in line):
s = list(line)
v = list(p[1])
foundSpace = False
written = False
j = -1
for i in range(0, len(s)):
if(s[i] == ' ' or s[i] == '\t'):
foundSpace = True
if(s[i] != ' ' and s[i] != '\t' and
foundSpace == True and written == False):
j = i
foundSpace = False
if(j >= 0):
written = True
if(i - j < len(v)):
s[i] = v[i - j]
if(i - j >= len(v) and foundSpace == False):
s[i] = ' '
# Join up line from characters
lineNew = "".join(s)
# Write line to output file
outFile.write(lineNew)
# Close all files
inFile.close()
outFile.close()
# Replace old input file with new
os.remove(inFileName)
os.rename(outFileName, inFileName)
|
realpython/flask-jwt-auth
|
refs/heads/master
|
project/server/config.py
|
4
|
# project/server/config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
postgres_local_base = 'postgresql://postgres:@localhost/'
database_name = 'flask_jwt_auth'
class BaseConfig:
"""Base configuration."""
SECRET_KEY = os.getenv('SECRET_KEY', 'my_precious')
DEBUG = False
BCRYPT_LOG_ROUNDS = 13
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG = True
BCRYPT_LOG_ROUNDS = 4
SQLALCHEMY_DATABASE_URI = postgres_local_base + database_name
class TestingConfig(BaseConfig):
"""Testing configuration."""
DEBUG = True
TESTING = True
BCRYPT_LOG_ROUNDS = 4
SQLALCHEMY_DATABASE_URI = postgres_local_base + database_name + '_test'
PRESERVE_CONTEXT_ON_EXCEPTION = False
class ProductionConfig(BaseConfig):
"""Production configuration."""
SECRET_KEY = 'my_precious'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql:///example'
|
SKA-ScienceDataProcessor/algorithm-reference-library
|
refs/heads/master
|
cluster_tests/ritoy-numba/cluster_test_ritoy_numba.py
|
1
|
# """ Radio interferometry toy
#
# This mimics the overall structure and workload of our processing.
#
# Tim Cornwell 9 Sept 2017
# realtimcornwell@gmail.com
# Adding Numba Testing, ^W_F^
# """
import numpy
from dask import delayed
from distributed import Client
import numba
# Make some randomly located points on 2D plane
@numba.jit('f8[:,:](i8,f8)',nopython=True)
def init_sparse_pre(n, margin):
numpy.random.seed(8753193)
# a = numpy.array([numpy.random.uniform(margin, 1.0 - margin, n),
# numpy.random.uniform(margin, 1.0 - margin, n)]).reshape([n, 2])
a = numpy.random.uniform(margin, 1.0 - margin, 2*n)
a = a.astype(numpy.float64)
a = a.reshape(-1,2)
return a
def init_sparse(n, margin=0.1):
return init_sparse_pre(n, margin)
# Put the points onto a grid and FFT
@numba.jit('c16[:,:](f8[:,:])',nopython=True)
def grid_and_invert_data_pre(sparse_data):
shape=[1024, 1024]
grid = numpy.zeros((1024,1024), dtype=numpy.complex128)
loc = numpy.array([1024.,1024.]) * sparse_data
out = numpy.empty_like(loc)
loc = numpy.round_(loc,0,out).astype(numpy.int64)
for i in range(0, sparse_data.shape[0]):
grid[loc[i,:]] = 1.0
return(grid)
def grid_and_invert_data(sparse_data, shape):
grid = grid_and_invert_data_pre(sparse_data)
return numpy.fft.fft(grid).real
if __name__ == '__main__':
import sys
import time
start=time.time()
# Process nchunks each of length len_chunk 2d points, making a psf of size shape
len_chunk = 16384*8
nchunks = 256*4
nreduce = 16*4
shape=[1024, 1024]
skip = 1
# We pass in the scheduler from the invoking script
if len(sys.argv) > 1:
scheduler = sys.argv[1]
client = Client(scheduler)
else:
client = Client()
sparse_graph_list = [delayed(init_sparse)(len_chunk) for i in range(nchunks)]
psf_graph_list = [delayed(grid_and_invert_data)(s, shape) for s in sparse_graph_list]
sum_psf_graph_rank1 = [delayed(numpy.sum)(psf_graph_list[i:i+nreduce]) for i in range(0, nchunks, nreduce)]
sum_psf_graph = delayed(numpy.sum)(sum_psf_graph_rank1)
future = client.compute(sum_psf_graph)
psf = future.result()
print(numpy.max(psf))
client.close()
print("*** Successfully reached end in %.1f seconds ***" % (time.time() - start))
exit(0)
|
ktaneishi/deepchem
|
refs/heads/master
|
deepchem/models/xgboost_models/__init__.py
|
2
|
"""
Scikit-learn wrapper interface of xgboost
"""
import numpy as np
import os
from deepchem.models import Model
from deepchem.models.sklearn_models import SklearnModel
from deepchem.utils.save import load_from_disk
from deepchem.utils.save import save_to_disk
from sklearn.model_selection import train_test_split, GridSearchCV
import tempfile
class XGBoostModel(SklearnModel):
"""
Abstract base class for XGBoost model.
"""
def __init__(self,
model_instance=None,
model_dir=None,
verbose=False,
**kwargs):
"""Abstract class for XGBoost models.
Parameters:
-----------
model_instance: object
Scikit-learn wrapper interface of xgboost
model_dir: str
Path to directory where model will be stored.
"""
if model_dir is not None:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
else:
model_dir = tempfile.mkdtemp()
self.model_dir = model_dir
self.model_instance = model_instance
self.model_class = model_instance.__class__
self.verbose = verbose
if 'early_stopping_rounds' in kwargs:
self.early_stopping_rounds = kwargs['early_stopping_rounds']
else:
self.early_stopping_rounds = 50
def fit(self, dataset, **kwargs):
"""
Fits XGBoost model to data.
"""
X = dataset.X
y = np.squeeze(dataset.y)
w = np.squeeze(dataset.w)
seed = self.model_instance.seed
import xgboost as xgb
if isinstance(self.model_instance, xgb.XGBClassifier):
xgb_metric = "auc"
sklearn_metric = "roc_auc"
stratify = y
elif isinstance(self.model_instance, xgb.XGBRegressor):
xgb_metric = "mae"
sklearn_metric = "neg_mean_absolute_error"
stratify = None
best_param = self._search_param(sklearn_metric, X, y)
# update model with best param
self.model_instance = self.model_class(**best_param)
# Find optimal n_estimators based on original learning_rate
# and early_stopping_rounds
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=seed, stratify=stratify)
self.model_instance.fit(
X_train,
y_train,
early_stopping_rounds=self.early_stopping_rounds,
eval_metric=xgb_metric,
eval_set=[(X_train, y_train), (X_test, y_test)],
verbose=self.verbose)
# Since test size is 20%, when retrain model to whole data, expect
# n_estimator increased to 1/0.8 = 1.25 time.
estimated_best_round = np.round(self.model_instance.best_ntree_limit * 1.25)
self.model_instance.n_estimators = np.int64(estimated_best_round)
self.model_instance.fit(X, y, eval_metric=xgb_metric, verbose=self.verbose)
def _search_param(self, metric, X, y):
'''
Find best potential parameters set using few n_estimators
'''
# Make sure user specified params are in the grid.
max_depth_grid = list(np.unique([self.model_instance.max_depth, 5, 7]))
colsample_bytree_grid = list(
np.unique([self.model_instance.colsample_bytree, 0.66, 0.9]))
reg_lambda_grid = list(np.unique([self.model_instance.reg_lambda, 1, 5]))
param_grid = {
'max_depth': max_depth_grid,
'learning_rate': [max(self.model_instance.learning_rate, 0.3)],
'n_estimators': [min(self.model_instance.n_estimators, 60)],
'gamma': [self.model_instance.gamma],
'min_child_weight': [self.model_instance.min_child_weight],
'max_delta_step': [self.model_instance.max_delta_step],
'subsample': [self.model_instance.subsample],
'colsample_bytree': colsample_bytree_grid,
'colsample_bylevel': [self.model_instance.colsample_bylevel],
'reg_alpha': [self.model_instance.reg_alpha],
'reg_lambda': reg_lambda_grid,
'scale_pos_weight': [self.model_instance.scale_pos_weight],
'base_score': [self.model_instance.base_score],
'seed': [self.model_instance.seed]
}
grid_search = GridSearchCV(
self.model_instance, param_grid, cv=2, refit=False, scoring=metric)
grid_search.fit(X, y)
best_params = grid_search.best_params_
# Change params back original params
best_params['learning_rate'] = self.model_instance.learning_rate
best_params['n_estimators'] = self.model_instance.n_estimators
return best_params
|
maartenq/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/cloudfront_facts.py
|
22
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_facts
short_description: Obtain facts about an AWS CloudFront distribution
description:
- Gets information about an AWS CloudFront distribution
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.3"
author: Willem van Ketwich (@wilvk)
options:
distribution_id:
description:
- The id of the CloudFront distribution. Used with I(distribution), I(distribution_config),
I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations).
required: false
invalidation_id:
description:
- The id of the invalidation to get information about. Used with I(invalidation).
required: false
origin_access_identity_id:
description:
- The id of the cloudfront origin access identity to get information about.
required: false
web_acl_id:
description:
- Used with I(list_distributions_by_web_acl_id).
required: false
domain_name_alias:
description:
- Can be used instead of I(distribution_id) - uses the aliased CNAME for the cloudfront
distribution to get the distribution id where required.
required: false
all_lists:
description:
- Get all cloudfront lists that do not require parameters.
required: false
default: false
origin_access_identity:
description:
- Get information about an origin access identity. Requires I(origin_access_identity_id)
to be specified.
required: false
default: false
origin_access_identity_config:
description:
- Get the configuration information about an origin access identity. Requires
I(origin_access_identity_id) to be specified.
required: false
default: false
distribution:
description:
- Get information about a distribution. Requires I(distribution_id) or I(domain_name_alias)
to be specified.
required: false
default: false
distribution_config:
description:
- Get the configuration information about a distribution. Requires I(distribution_id)
or I(domain_name_alias) to be specified.
required: false
default: false
invalidation:
description:
- Get information about an invalidation. Requires I(invalidation_id) to be specified.
required: false
default: false
streaming_distribution:
description:
- Get information about a specified RTMP distribution. Requires I(distribution_id) or
I(domain_name_alias) to be specified.
required: false
default: false
streaming_distribution_config:
description:
- Get the configuration information about a specified RTMP distribution.
Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
list_origin_access_identities:
description:
- Get a list of cloudfront origin access identities. Requires I(origin_access_identity_id) to be set.
required: false
default: false
list_distributions:
description:
- Get a list of cloudfront distributions.
required: false
default: false
list_distributions_by_web_acl_id:
description:
- Get a list of distributions using web acl id as a filter. Requires I(web_acl_id) to be set.
required: false
default: false
list_invalidations:
description:
- Get a list of invalidations. Requires I(distribution_id) or I(domain_name_alias) to be specified.
required: false
default: false
list_streaming_distributions:
description:
- Get a list of streaming distributions.
required: false
default: false
summary:
description:
- Returns a summary of all distributions, streaming distributions and origin_access_identities.
This is the default behaviour if no option is selected.
required: false
default: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Get a summary of distributions
- cloudfront_facts:
summary: true
# Get information about a distribution
- cloudfront_facts:
distribution: true
distribution_id: my-cloudfront-distribution-id
# Get information about a distribution using the CNAME of the cloudfront distribution.
- cloudfront_facts:
distribution: true
domain_name_alias: www.my-website.com
# Facts are published in ansible_facts['cloudfront'][<distribution_name>]
- debug:
msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}"
- debug:
msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}"
# Get all information about an invalidation for a distribution.
- cloudfront_facts:
invalidation: true
distribution_id: my-cloudfront-distribution-id
invalidation_id: my-cloudfront-invalidation-id
# Get all information about a cloudfront origin access identity.
- cloudfront_facts:
origin_access_identity: true
origin_access_identity_id: my-cloudfront-origin-access-identity-id
# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
- cloudfront_facts:
origin_access_identity: true
origin_access_identity_id: my-cloudfront-origin-access-identity-id
# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
- cloudfront_facts:
all_lists: true
'''
RETURN = '''
origin_access_identity:
description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
returned: only if I(origin_access_identity) is true
type: dict
origin_access_identity_configuration:
description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set.
returned: only if I(origin_access_identity_configuration) is true
type: dict
distribution:
description: >
Facts about a cloudfront distribution. Requires I(distribution_id) or I(domain_name_alias)
to be specified. Requires I(origin_access_identity_id) to be set.
returned: only if distribution is true
type: dict
distribution_config:
description: >
Facts about a cloudfront distribution's config. Requires I(distribution_id) or I(domain_name_alias)
to be specified.
returned: only if I(distribution_config) is true
type: dict
invalidation:
description: >
Describes the invalidation information for the distribution. Requires
I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.)
returned: only if invalidation is true
type: dict
streaming_distribution:
description: >
Describes the streaming information for the distribution. Requires
I(distribution_id) or I(domain_name_alias) to be specified.
returned: only if I(streaming_distribution) is true
type: dict
streaming_distribution_config:
description: >
Describes the streaming configuration information for the distribution.
Requires I(distribution_id) or I(domain_name_alias) to be specified.
returned: only if I(streaming_distribution_config) is true
type: dict
summary:
description: Gives a summary of distributions, streaming distributions and origin access identities.
returned: as default or if summary is true
type: dict
'''
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
from ansible.module_utils.basic import AnsibleModule
from functools import partial
import traceback
try:
import botocore
except ImportError:
pass # will be caught by imported HAS_BOTO3
class CloudFrontServiceManager:
"""Handles CloudFront Services"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.client = boto3_conn(module, conn_type='client',
resource='cloudfront', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoRegionError:
self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION "
"environment variable or in boto configuration file")
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Can't establish connection - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_distribution(self, distribution_id):
try:
func = partial(self.client.get_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_distribution_config(self, distribution_id):
try:
func = partial(self.client.get_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_origin_access_identity(self, origin_access_identity_id):
try:
func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_origin_access_identity_config(self, origin_access_identity_id):
try:
func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_invalidation(self, distribution_id, invalidation_id):
try:
func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing invalidation - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_streaming_distribution(self, distribution_id):
try:
func = partial(self.client.get_streaming_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_streaming_distribution_config(self, distribution_id):
try:
func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_origin_access_identities(self):
try:
func = partial(self.client.list_cloud_front_origin_access_identities)
origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList')
if origin_access_identity_list['Quantity'] > 0:
return origin_access_identity_list['Items']
return {}
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_distributions(self, keyed=True):
try:
func = partial(self.client.list_distributions)
distribution_list = self.paginated_response(func, 'DistributionList')
if distribution_list['Quantity'] == 0:
return {}
else:
distribution_list = distribution_list['Items']
if not keyed:
return distribution_list
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing distributions - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_distributions_by_web_acl_id(self, web_acl_id):
try:
func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id)
distribution_list = self.paginated_response(func, 'DistributionList')
if distribution_list['Quantity'] == 0:
return {}
else:
distribution_list = distribution_list['Items']
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_invalidations(self, distribution_id):
try:
func = partial(self.client.list_invalidations, DistributionId=distribution_id)
invalidation_list = self.paginated_response(func, 'InvalidationList')
if invalidation_list['Quantity'] > 0:
return invalidation_list['Items']
return {}
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing invalidations - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def list_streaming_distributions(self, keyed=True):
try:
func = partial(self.client.list_streaming_distributions)
streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList')
if streaming_distribution_list['Quantity'] == 0:
return {}
else:
streaming_distribution_list = streaming_distribution_list['Items']
if not keyed:
return streaming_distribution_list
return self.keyed_list_helper(streaming_distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error listing streaming distributions - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def summary(self):
summary_dict = {}
summary_dict.update(self.summary_get_distribution_list(False))
summary_dict.update(self.summary_get_distribution_list(True))
summary_dict.update(self.summary_get_origin_access_identity_list())
return summary_dict
def summary_get_origin_access_identity_list(self):
try:
origin_access_identity_list = {'origin_access_identities': []}
origin_access_identities = self.list_origin_access_identities()
for origin_access_identity in origin_access_identities:
oai_id = origin_access_identity['Id']
oai_full_response = self.get_origin_access_identity(oai_id)
oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
origin_access_identity_list['origin_access_identities'].append(oai_summary)
return origin_access_identity_list
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def summary_get_distribution_list(self, streaming=False):
try:
list_name = 'streaming_distributions' if streaming else 'distributions'
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
distribution_list = {list_name: []}
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
for dist in distributions:
temp_distribution = {}
for key_name in key_list:
temp_distribution[key_name] = dist[key_name]
temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
if not streaming:
temp_distribution['WebACLId'] = dist['WebACLId']
invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
if invalidation_ids:
temp_distribution['Invalidations'] = invalidation_ids
resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
distribution_list[list_name].append(temp_distribution)
return distribution_list
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except Exception as e:
self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
exception=traceback.format_exc())
def get_etag_from_distribution_id(self, distribution_id, streaming):
distribution = {}
if not streaming:
distribution = self.get_distribution(distribution_id)
else:
distribution = self.get_streaming_distribution(distribution_id)
return distribution['ETag']
def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
try:
invalidation_ids = []
invalidations = self.list_invalidations(distribution_id)
for invalidation in invalidations:
invalidation_ids.append(invalidation['Id'])
return invalidation_ids
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_distribution_id_from_domain_name(self, domain_name):
try:
distribution_id = ""
distributions = self.list_distributions(False)
distributions += self.list_streaming_distributions(False)
for dist in distributions:
if 'Items' in dist['Aliases']:
for alias in dist['Aliases']['Items']:
if str(alias).lower() == domain_name.lower():
distribution_id = dist['Id']
break
return distribution_id
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def get_aliases_from_distribution_id(self, distribution_id):
aliases = []
try:
distributions = self.list_distributions(False)
for dist in distributions:
if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
for alias in dist['Aliases']['Items']:
aliases.append(alias)
break
return aliases
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def paginated_response(self, func, result_key=""):
'''
Returns expanded response for paginated operations.
The 'result_key' is used to define the concatenated results that are combined from each paginated response.
'''
args = dict()
results = dict()
loop = True
while loop:
response = func(**args)
if result_key == "":
result = response
result.pop('ResponseMetadata', None)
else:
result = response.get(result_key)
results.update(result)
args['Marker'] = response.get('NextMarker')
for key in response.keys():
if key.endswith('List'):
args['Marker'] = response[key].get('NextMarker')
break
loop = args['Marker'] is not None
return results
def keyed_list_helper(self, list_to_key):
keyed_list = dict()
for item in list_to_key:
distribution_id = item['Id']
if 'Items' in item['Aliases']:
aliases = item['Aliases']['Items']
for alias in aliases:
keyed_list.update({alias: item})
keyed_list.update({distribution_id: item})
return keyed_list
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
facts[distribution_id].update(details)
for alias in aliases:
facts[alias].update(details)
return facts
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
distribution_id=dict(required=False, type='str'),
invalidation_id=dict(required=False, type='str'),
origin_access_identity_id=dict(required=False, type='str'),
domain_name_alias=dict(required=False, type='str'),
all_lists=dict(required=False, default=False, type='bool'),
distribution=dict(required=False, default=False, type='bool'),
distribution_config=dict(required=False, default=False, type='bool'),
origin_access_identity=dict(required=False, default=False, type='bool'),
origin_access_identity_config=dict(required=False, default=False, type='bool'),
invalidation=dict(required=False, default=False, type='bool'),
streaming_distribution=dict(required=False, default=False, type='bool'),
streaming_distribution_config=dict(required=False, default=False, type='bool'),
list_origin_access_identities=dict(required=False, default=False, type='bool'),
list_distributions=dict(required=False, default=False, type='bool'),
list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
list_invalidations=dict(required=False, default=False, type='bool'),
list_streaming_distributions=dict(required=False, default=False, type='bool'),
summary=dict(required=False, default=False, type='bool')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
service_mgr = CloudFrontServiceManager(module)
distribution_id = module.params.get('distribution_id')
invalidation_id = module.params.get('invalidation_id')
origin_access_identity_id = module.params.get('origin_access_identity_id')
web_acl_id = module.params.get('web_acl_id')
domain_name_alias = module.params.get('domain_name_alias')
all_lists = module.params.get('all_lists')
distribution = module.params.get('distribution')
distribution_config = module.params.get('distribution_config')
origin_access_identity = module.params.get('origin_access_identity')
origin_access_identity_config = module.params.get('origin_access_identity_config')
invalidation = module.params.get('invalidation')
streaming_distribution = module.params.get('streaming_distribution')
streaming_distribution_config = module.params.get('streaming_distribution_config')
list_origin_access_identities = module.params.get('list_origin_access_identities')
list_distributions = module.params.get('list_distributions')
list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
list_invalidations = module.params.get('list_invalidations')
list_streaming_distributions = module.params.get('list_streaming_distributions')
summary = module.params.get('summary')
aliases = []
result = {'cloudfront': {}}
facts = {}
require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
streaming_distribution_config or list_invalidations)
# set default to summary if no option specified
summary = summary or not (distribution or distribution_config or origin_access_identity or
origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
list_streaming_distributions or list_distributions)
# validations
if require_distribution_id and distribution_id is None and domain_name_alias is None:
module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
if (invalidation and invalidation_id is None):
module.fail_json(msg='Error invalidation_id has not been specified.')
if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
module.fail_json(msg='Error origin_access_identity_id has not been specified.')
if list_distributions_by_web_acl_id and web_acl_id is None:
module.fail_json(msg='Error web_acl_id has not been specified.')
# get distribution id from domain name alias
if require_distribution_id and distribution_id is None:
distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
if not distribution_id:
module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
# set appropriate cloudfront id
if distribution_id and not list_invalidations:
facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
facts.update({alias: {}})
if invalidation_id:
facts.update({invalidation_id: {}})
elif distribution_id and list_invalidations:
facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
facts.update({alias: {}})
elif origin_access_identity_id:
facts = {origin_access_identity_id: {}}
elif web_acl_id:
facts = {web_acl_id: {}}
# get details based on options
if distribution:
facts_to_set = service_mgr.get_distribution(distribution_id)
if distribution_config:
facts_to_set = service_mgr.get_distribution_config(distribution_id)
if origin_access_identity:
facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
if origin_access_identity_config:
facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
if invalidation:
facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
facts[invalidation_id].update(facts_to_set)
if streaming_distribution:
facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
if streaming_distribution_config:
facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
if list_invalidations:
facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
if 'facts_to_set' in vars():
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
# get list based on options
if all_lists or list_origin_access_identities:
facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
if all_lists or list_distributions:
facts['distributions'] = service_mgr.list_distributions()
if all_lists or list_streaming_distributions:
facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
if list_distributions_by_web_acl_id:
facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
if list_invalidations:
facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
# default summary option
if summary:
facts['summary'] = service_mgr.summary()
result['changed'] = False
result['cloudfront'].update(facts)
module.exit_json(msg="Retrieved cloudfront facts.", ansible_facts=result)
if __name__ == '__main__':
main()
|
marklee77/muttutils
|
refs/heads/master
|
save_addrs.py
|
1
|
#!/usr/bin/env python
# addr_lookup.py
# Copyright (C) 2013 Mark Lee Stillwell
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def format_name(namein):
from email.header import decode_header
import re
if not namein:
return None
nameout = None
try:
nameout = " ".join(unicode(s, c) if c else unicode(s)
for s, c in decode_header(namein))
except UnicodeEncodeError:
nameout = unicode(namein)
m = re.search(r'"([^()\[\]]*).*"', nameout)
if m:
nameout = m.group(1)
nameout = re.sub(r'\s*([^,]*[^,\s]),\s*(.*[^\s])\s*', r'\2 \1', nameout)
m = re.search(r'".*\((.+)\).*"', namein)
if m:
nameout += " (" + m.group(1) + ")"
m = re.search(r'".*\[(.+)\].*"', namein)
if m:
nameout += " [" + m.group(1) + "]"
return nameout
def main(argv=None):
from argparse import ArgumentParser
from email.parser import Parser
from email.utils import getaddresses
from os.path import expanduser, isfile
from sqlite3 import connect
import sqlite3
from sys import stdin
argparser = ArgumentParser(description="Render a file using templates.")
argparser.add_argument('-b', '--dbfile', default='~/.mutt/abook.db',
help='database file')
args = argparser.parse_args()
database_file = expanduser(args.dbfile)
if not isfile(database_file):
raise SystemExit("ERROR: no such file '%s'!" % args.dbfile)
msgparser = Parser()
msg = msgparser.parse(stdin, True)
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
bccs = msg.get_all('bcc', [])
try:
con = connect(database_file)
cur = con.cursor()
for prename, email in getaddresses(tos + ccs + bccs):
name = format_name(prename)
cur.execute("select name from addresses where email like '" + email + "';")
row = cur.fetchone();
if not row:
if name and len(name) > 0:
cur.execute("insert into addresses (name, email, created, modified) values ('" + name + "', '" + email + "', datetime('now'), datetime('now'));")
else:
cur.execute("insert into addresses (email, created, modified) values ('" + email + "', datetime('now'), datetime('now'));")
elif name and len(name) > 0:
cur.execute("update addresses set name = '" + name + "', modified = datetime('now') where email like '" + email +"';")
con.commit()
except sqlite3.Error, e:
raise SystemExit("ERROR: %s" % e.args[0])
finally:
if con:
con.close()
if __name__ == "__main__":
main()
|
shuggiefisher/crowdstock
|
refs/heads/master
|
django/utils/unittest/collector.py
|
572
|
import os
import sys
from django.utils.unittest.loader import defaultTestLoader
def collector():
# import __main__ triggers code re-execution
__main__ = sys.modules['__main__']
setupDir = os.path.abspath(os.path.dirname(__main__.__file__))
return defaultTestLoader.discover(setupDir)
|
dstufft/jinja2
|
refs/heads/master
|
jinja2/compiler.py
|
11
|
# -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
# The getattr is necessary for pypy which does not set this attribute if
# no closure is on the function
unoptimize_before_dead_code = bool(
getattr(unoptimize_before_dead_code(), '__closure__', None))
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = CodeGenerator(environment, name, filename, stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, float, complex, range_type,
Markup) + string_types):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_AssignBlock(self, node):
"""Stop visiting at block assigns."""
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in iteritems(aliases):
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overridden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overridden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overridden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('l_loop = missing')
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
if self.environment.finalize:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
idx = -1
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def make_assignment_frame(self, frame):
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if not frame.toplevel:
return frame
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
return assignment_frame
def export_assigned_vars(self, frame, assignment_frame):
if not frame.toplevel:
return
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
def visit_Assign(self, node, frame):
self.newline(node)
assignment_frame = self.make_assignment_frame(frame)
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
self.export_assigned_vars(frame, assignment_frame)
def visit_AssignBlock(self, node, frame):
block_frame = frame.inner()
block_frame.inspect(node.body)
aliases = self.push_scope(block_frame)
self.pull_locals(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.pop_scope(aliases, block_frame)
assignment_frame = self.make_assignment_frame(frame)
self.newline(node)
self.visit(node.target, assignment_frame)
self.write(' = concat(%s)' % block_frame.buffer)
self.export_assigned_vars(frame, assignment_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
|
bhargavvader/gensim
|
refs/heads/develop
|
gensim/test/simspeed.py
|
14
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s CORPUS_DENSE.mm CORPUS_SPARSE.mm [NUMDOCS]
Run speed test of similarity queries. Only use the first NUMDOCS documents of \
each corpus for testing (or use all if no NUMDOCS is given).
The two sample corpora can be downloaded from http://nlp.fi.muni.cz/projekty/gensim/wikismall.tgz
Example: ./simspeed.py wikismall.dense.mm wikismall.sparse.mm 5000
"""
import logging
import sys
import itertools
import os
import math
from time import time
import numpy as np
import scipy.sparse
import gensim
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
corpus_dense = gensim.corpora.MmCorpus(sys.argv[1])
corpus_sparse = gensim.corpora.MmCorpus(sys.argv[2])
NUMTERMS = corpus_sparse.num_terms
if len(sys.argv) > 3:
NUMDOCS = int(sys.argv[3])
corpus_dense = list(itertools.islice(corpus_dense, NUMDOCS))
corpus_sparse = list(itertools.islice(corpus_sparse, NUMDOCS))
# create the query index to be tested (one for dense input, one for sparse)
index_dense = gensim.similarities.MatrixSimilarity(corpus_dense)
index_sparse = gensim.similarities.SparseMatrixSimilarity(corpus_sparse, num_terms=NUMTERMS)
density = 100.0 * index_sparse.index.nnz / (index_sparse.index.shape[0] * index_sparse.index.shape[1])
# Difference between test #1 and test #3 is that the query in #1 is a gensim iterable
# corpus, while in #3, the index is used directly (np arrays). So #1 is slower,
# because it needs to convert sparse vecs to np arrays and normalize them to
# unit length=extra work, which #3 avoids.
query = list(itertools.islice(corpus_dense, 1000))
logging.info("test 1 (dense): dense corpus of %i docs vs. index (%i documents, %i dense features)" %
(len(query), len(index_dense), index_dense.num_features))
for chunksize in [1, 4, 8, 16, 64, 128, 256, 512, 1024]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
sim = index_dense[chunk]
sims.extend(sim)
else:
sims = [index_dense[vec] for vec in query]
assert len(sims) == len(query) # make sure we have one result for each query document
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(query) / taken, queries / taken))
# Same comment as for test #1 but vs. test #4.
query = list(itertools.islice(corpus_sparse, 1000))
logging.info("test 2 (sparse): sparse corpus of %i docs vs. sparse index (%i documents, %i features, %.2f%% density)" %
(len(query), len(corpus_sparse), index_sparse.index.shape[1], density))
for chunksize in [1, 5, 10, 100, 500, 1000]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
sim = index_sparse[chunk]
sims.extend(sim)
else:
sims = [index_sparse[vec] for vec in query]
assert len(sims) == len(query) # make sure we have one result for each query document
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(query) / taken, queries / taken))
logging.info("test 3 (dense): similarity of all vs. all (%i documents, %i dense features)" %
(len(corpus_dense), index_dense.num_features))
for chunksize in [0, 1, 4, 8, 16, 64, 128, 256, 512, 1024]:
index_dense.chunksize = chunksize
start = time()
# `sims` stores the entire N x N sim matrix in memory!
# this is not necessary, but i added it to test the accuracy of the result
# (=report mean diff below)
sims = [sim for sim in index_dense]
taken = time() - start
sims = np.asarray(sims)
if chunksize == 0:
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s)" % (chunksize, taken, len(corpus_dense) / taken))
unchunksizeed = sims
else:
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
diff = np.mean(np.abs(unchunksizeed - sims))
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s), meandiff=%.3e" %
(chunksize, taken, len(corpus_dense) / taken, queries / taken, diff))
del sims
index_dense.num_best = 10
logging.info("test 4 (dense): as above, but only ask for the top-10 most similar for each document")
for chunksize in [0, 1, 4, 8, 16, 64, 128, 256, 512, 1024]:
index_dense.chunksize = chunksize
start = time()
sims = [sim for sim in index_dense]
taken = time() - start
if chunksize == 0:
queries = len(corpus_dense)
else:
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(corpus_dense) / taken, queries / taken))
index_dense.num_best = None
logging.info("test 5 (sparse): similarity of all vs. all (%i documents, %i features, %.2f%% density)" %
(len(corpus_sparse), index_sparse.index.shape[1], density))
for chunksize in [0, 5, 10, 100, 500, 1000, 5000]:
index_sparse.chunksize = chunksize
start = time()
sims = [sim for sim in index_sparse]
taken = time() - start
sims = np.asarray(sims)
if chunksize == 0:
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s)" % (chunksize, taken, len(corpus_sparse) / taken))
unchunksizeed = sims
else:
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
diff = np.mean(np.abs(unchunksizeed - sims))
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s), meandiff=%.3e" %
(chunksize, taken, len(corpus_sparse) / taken, queries / taken, diff))
del sims
index_sparse.num_best = 10
logging.info("test 6 (sparse): as above, but only ask for the top-10 most similar for each document")
for chunksize in [0, 5, 10, 100, 500, 1000, 5000]:
index_sparse.chunksize = chunksize
start = time()
sims = [sim for sim in index_sparse]
taken = time() - start
if chunksize == 0:
queries = len(corpus_sparse)
else:
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(corpus_sparse) / taken, queries / taken))
index_sparse.num_best = None
logging.info("finished running %s" % program)
|
heyandie/django-qsstats-magic
|
refs/heads/master
|
test_settings/postgres.py
|
1
|
INSTALLED_APPS = (
'qsstats',
'django.contrib.auth',
'django.contrib.contenttypes'
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'qsstats_test',
'USER': 'qsstats_test',
'PASSWORD': 'qsstats_test',
}
}
SECRET_KEY = 'foo'
|
persandstrom/home-assistant
|
refs/heads/master
|
homeassistant/components/switch/ihc.py
|
3
|
"""IHC switch platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.ihc/
"""
import voluptuous as vol
from homeassistant.components.ihc import (
validate_name, IHC_DATA, IHC_CONTROLLER, IHC_INFO)
from homeassistant.components.ihc.ihcdevice import IHCDevice
from homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA
from homeassistant.const import CONF_ID, CONF_NAME, CONF_SWITCHES
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['ihc']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SWITCHES, default=[]):
vol.All(cv.ensure_list, [
vol.All({
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
}, validate_name)
])
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC switch platform."""
ihc_controller = hass.data[IHC_DATA][IHC_CONTROLLER]
info = hass.data[IHC_DATA][IHC_INFO]
devices = []
if discovery_info:
for name, device in discovery_info.items():
ihc_id = device['ihc_id']
product = device['product']
switch = IHCSwitch(ihc_controller, name, ihc_id, info, product)
devices.append(switch)
else:
switches = config[CONF_SWITCHES]
for switch in switches:
ihc_id = switch[CONF_ID]
name = switch[CONF_NAME]
sensor = IHCSwitch(ihc_controller, name, ihc_id, info)
devices.append(sensor)
add_entities(devices)
class IHCSwitch(IHCDevice, SwitchDevice):
"""IHC Switch."""
def __init__(self, ihc_controller, name: str, ihc_id: int,
info: bool, product=None) -> None:
"""Initialize the IHC switch."""
super().__init__(ihc_controller, name, ihc_id, product)
self._state = False
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.ihc_controller.set_runtime_value_bool(self.ihc_id, True)
def turn_off(self, **kwargs):
"""Turn the device off."""
self.ihc_controller.set_runtime_value_bool(self.ihc_id, False)
def on_ihc_change(self, ihc_id, value):
"""Handle IHC resource change."""
self._state = value
self.schedule_update_ha_state()
|
ESSS/numpy
|
refs/heads/master
|
numpy/distutils/misc_util.py
|
14
|
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import imp
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import distutils
from distutils.errors import DistutilsError
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
try:
set
except NameError:
from sets import Set as set
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib(object):
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path.
"""
pd = os.path.abspath(parent_path)
apath = os.path.abspath(path)
if len(apath)<len(pd):
return path
if apath==pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300': 'msvcr70', # MSVC 7.0
'1310': 'msvcr71', # MSVC 7.1
'1400': 'msvcr80', # MSVC 8
'1500': 'msvcr90', # MSVC 9 (VS 2008)
'1600': 'msvcr100', # MSVC 10 (aka 2010)
}.get(msc_ver, None)
else:
lib = None
return lib
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source, 'r')
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, basestring)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
fo_setup_py = open(setup_py, 'U')
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = imp.load_module('_'.join(n.split('.')),
fo_setup_py,
setup_py,
('.py', 'U', 1))
fo_setup_py.close()
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat::
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compiler_args
* extra_f90_compiler_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['svnversion'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['hg identify --num'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
f = open(branch_fn)
revision0 = f.read().strip()
f.close()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
revision = branch_map.get(branch0)
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version\__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = (open(fn), fn, ('.py', 'U', 1))
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = imp.load_module('_'.join(n.split('.')),*info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
))
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
''')
f.close()
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
if sys.version[:3] >= '2.5':
def get_build_architecture():
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
else:
#copied from python 2.5.1 distutils/msvccompiler.py
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
|
BambooL/jeeves
|
refs/heads/master
|
demo/tests/simpleRule/wsgi.py
|
15
|
"""
WSGI jelfig for jelf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__), 'jelf/'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
os.environ["DJANGO_SETTINGS_MODULE"] = "jelf.settings"
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
saurabh6790/test_final_med_app
|
refs/heads/master
|
accounts/doctype/budget_distribution/test_budget_distribution.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
test_records = [
[{
"doctype": "Budget Distribution",
"distribution_id": "_Test Distribution",
"fiscal_year": "_Test Fiscal Year 2013",
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "January",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "February",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "March",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "April",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "May",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "June",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "July",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "August",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "September",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "October",
"percentage_allocation": "8"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "November",
"percentage_allocation": "10"
}, {
"doctype": "Budget Distribution Detail",
"parentfield": "budget_distribution_details",
"month": "December",
"percentage_allocation": "10"
}]
]
|
kingvuplus/PKT-gui2
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/AnimationSetup/plugin.py
|
15
|
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Sources.StaticText import StaticText
from Components.config import config, ConfigNumber, ConfigSelectionNumber, getConfigListEntry
from Plugins.Plugin import PluginDescriptor
from enigma import setAnimation_current, setAnimation_speed
# default = slide to left
g_default = {
"current": 0,
"speed" : 20,
}
g_max_speed = 30
g_animation_paused = False
g_orig_show = None
g_orig_doClose = None
config.misc.window_animation_default = ConfigNumber(default=g_default["current"])
config.misc.window_animation_speed = ConfigSelectionNumber(1, g_max_speed, 1, default=g_default["speed"])
class AnimationSetupConfig(ConfigListScreen, Screen):
skin= """
<screen position="center,center" size="600,140" title="Animation Settings">
<widget name="config" position="0,0" size="600,100" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/buttons/red.png" position="0,100" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,100" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,100" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,100" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,100" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,100" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#a08500" transparent="1" />
</screen>
"""
def __init__(self, session):
self.session = session
self.entrylist = []
Screen.__init__(self, session)
ConfigListScreen.__init__(self, self.entrylist)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions",], {
"ok" : self.keyGreen,
"green" : self.keyGreen,
"yellow" : self.keyYellow,
"red" : self.keyRed,
"cancel" : self.keyRed,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText(_("Default"))
self.makeConfigList()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_('Animation Setup'))
def keyGreen(self):
config.misc.window_animation_speed.save()
setAnimation_speed(int(config.misc.window_animation_speed.value))
self.close()
def keyRed(self):
config.misc.window_animation_speed.cancel()
self.close()
def keyYellow(self):
global g_default
config.misc.window_animation_speed.value = g_default["speed"]
self.makeConfigList()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def makeConfigList(self):
self.entrylist = []
entrySpeed = getConfigListEntry(_("Animation Speed"), config.misc.window_animation_speed)
self.entrylist.append(entrySpeed)
self["config"].list = self.entrylist
self["config"].l.setList(self.entrylist)
class AnimationSetupScreen(Screen):
animationSetupItems = [
{"idx":0, "name":_("Disable Animations")},
{"idx":1, "name":_("Simple fade")},
{"idx":2, "name":_("Grow drop")},
{"idx":3, "name":_("Grow from left")},
{"idx":4, "name":_("Popup")},
{"idx":5, "name":_("Slide drop")},
{"idx":6, "name":_("Slide left to right")},
{"idx":7, "name":_("Slide top to bottom")},
{"idx":8, "name":_("Stripes")},
]
skin = """
<screen name="AnimationSetup" position="center,center" size="580,400" title="Animation Setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" zPosition="1" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" zPosition="1" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" zPosition="1" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" zPosition="1" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="2" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="2" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="2" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="2" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="10,60" size="560,364" scrollbarMode="showOnDemand" />
<widget source="introduction" render="Label" position="0,370" size="560,40" zPosition="10" font="Regular;20" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = AnimationSetupScreen.skin
Screen.__init__(self, session)
self.animationList = []
self["introduction"] = StaticText(_("* current animation"))
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText(_("Setting"))
self["key_blue"] = StaticText(_("Preview"))
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.keyclose,
"save": self.ok,
"ok" : self.ok,
"yellow": self.config,
"blue": self.preview
}, -3)
self["list"] = MenuList(self.animationList)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
l = []
for x in self.animationSetupItems:
key = x.get("idx", 0)
name = x.get("name", "??")
if key == config.misc.window_animation_default.value:
name = "* %s" % (name)
l.append( (name, key) )
self["list"].setList(l)
def ok(self):
current = self["list"].getCurrent()
if current:
key = current[1]
config.misc.window_animation_default.value = key
config.misc.window_animation_default.save()
setAnimation_current(key)
self.close()
def keyclose(self):
setAnimation_current(config.misc.window_animation_default.value)
setAnimation_speed(int(config.misc.window_animation_speed.value))
self.close()
def config(self):
self.session.open(AnimationSetupConfig)
def preview(self):
current = self["list"].getCurrent()
if current:
global g_animation_paused
tmp = g_animation_paused
g_animation_paused = False
setAnimation_current(current[1])
self.session.open(MessageBox, current[0], MessageBox.TYPE_INFO, timeout=3)
g_animation_paused = tmp
def checkAttrib(self, paused):
global g_animation_paused
if g_animation_paused is paused and self.skinAttributes is not None:
for (attr, value) in self.skinAttributes:
if attr == "animationPaused" and value in ("1", "on"):
return True
return False
def screen_show(self):
global g_animation_paused
if g_animation_paused:
setAnimation_current(0)
g_orig_show(self)
if checkAttrib(self, False):
g_animation_paused = True
def screen_doClose(self):
global g_animation_paused
if checkAttrib(self, True):
g_animation_paused = False
setAnimation_current(config.misc.window_animation_default.value)
g_orig_doClose(self)
def animationSetupMain(session, **kwargs):
session.open(AnimationSetupScreen)
def startAnimationSetup(menuid):
if menuid != "osd_menu":
return []
return [( _("Animations"), animationSetupMain, "animation_setup", None)]
def sessionAnimationSetup(session, reason, **kwargs):
setAnimation_current(config.misc.window_animation_default.value)
setAnimation_speed(int(config.misc.window_animation_speed.value))
global g_orig_show, g_orig_doClose
if g_orig_show is None:
g_orig_show = Screen.show
if g_orig_doClose is None:
g_orig_doClose = Screen.doClose
Screen.show = screen_show
Screen.doClose = screen_doClose
def Plugins(**kwargs):
plugin_list = [
PluginDescriptor(
name = "Animations",
description = "Setup UI animations",
where = PluginDescriptor.WHERE_MENU,
needsRestart = False,
fnc = startAnimationSetup),
PluginDescriptor(
where = PluginDescriptor.WHERE_SESSIONSTART,
needsRestart = False,
fnc = sessionAnimationSetup),
]
return plugin_list;
|
jshum/dd-agent
|
refs/heads/master
|
checks.d/nagios.py
|
27
|
# stdlib
from collections import namedtuple
import re
# project
from checks import AgentCheck
from utils.tailfile import TailFile
# fields order for each event type, as named tuples
EVENT_FIELDS = {
'CURRENT HOST STATE': namedtuple('E_CurrentHostState', 'host, event_state, event_soft_hard, return_code, payload'),
'CURRENT SERVICE STATE': namedtuple('E_CurrentServiceState', 'host, check_name, event_state, event_soft_hard, return_code, payload'),
'SERVICE ALERT': namedtuple('E_ServiceAlert', 'host, check_name, event_state, event_soft_hard, return_code, payload'),
'PASSIVE SERVICE CHECK': namedtuple('E_PassiveServiceCheck', 'host, check_name, return_code, payload'),
'HOST ALERT': namedtuple('E_HostAlert', 'host, event_state, event_soft_hard, return_code, payload'),
# [1305744274] SERVICE NOTIFICATION: ops;ip-10-114-237-165;Metric ETL;ACKNOWLEDGEMENT (CRITICAL);notify-service-by-email;HTTP CRITICAL: HTTP/1.1 503 Service Unavailable - 394 bytes in 0.010 second response time;datadog;alq
'SERVICE NOTIFICATION': namedtuple('E_ServiceNotification', 'contact, host, check_name, event_state, notification_type, payload'),
# [1296509331] SERVICE FLAPPING ALERT: ip-10-114-97-27;cassandra JVM Heap;STARTED; Service appears to have started flapping (23.4% change >= 20.0% threshold)
# [1296662511] SERVICE FLAPPING ALERT: ip-10-114-97-27;cassandra JVM Heap;STOPPED; Service appears to have stopped flapping (3.8% change < 5.0% threshold)
'SERVICE FLAPPING ALERT': namedtuple('E_FlappingAlert', 'host, check_name, flap_start_stop, payload'),
# Reference for external commands: http://old.nagios.org/developerinfo/externalcommands/commandlist.php
# Command Format:
# ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;<sticky>;<notify>;<persistent>;<author>;<comment>
# [1305832665] EXTERNAL COMMAND: ACKNOWLEDGE_SVC_PROBLEM;ip-10-202-161-236;Resources ETL;2;1;0;datadog;alq checking
'ACKNOWLEDGE_SVC_PROBLEM': namedtuple('E_ServiceAck', 'host, check_name, sticky_ack, notify_ack, persistent_ack, ack_author, payload'),
# Command Format:
# ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent>;<author>;<comment>
'ACKNOWLEDGE_HOST_PROBLEM': namedtuple('E_HostAck', 'host, sticky_ack, notify_ack, persistent_ack, ack_author, payload'),
# Comment Format:
# PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<result_code>;<comment>
# We ignore it because Nagios will log a "PASSIVE SERVICE CHECK" after
# receiving this, and we don't want duplicate events to be counted.
'PROCESS_SERVICE_CHECK_RESULT': False,
# Host Downtime
# [1297894825] HOST DOWNTIME ALERT: ip-10-114-89-59;STARTED; Host has entered a period of scheduled downtime
# [1297894825] SERVICE DOWNTIME ALERT: ip-10-114-237-165;intake;STARTED; Service has entered a period of scheduled downtime
'HOST DOWNTIME ALERT': namedtuple('E_HostDowntime', 'host, downtime_start_stop, payload'),
'SERVICE DOWNTIME ALERT': namedtuple('E_ServiceDowntime', 'host, check_name, downtime_start_stop, payload'),
}
# Regex for the Nagios event log
RE_LINE_REG = re.compile('^\[(\d+)\] EXTERNAL COMMAND: (\w+);(.*)$')
RE_LINE_EXT = re.compile('^\[(\d+)\] ([^:]+): (.*)$')
class Nagios(AgentCheck):
NAGIOS_CONF_KEYS = [
re.compile('^(?P<key>log_file)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>host_perfdata_file_template)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>service_perfdata_file_template)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>host_perfdata_file)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>service_perfdata_file)\s*=\s*(?P<value>.+)$'),
]
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.nagios_tails = {}
check_freq = init_config.get("check_freq", 15)
if instances is not None:
for instance in instances:
tailers = []
nagios_conf = {}
instance_key = None
if 'nagios_conf' in instance: # conf.d check
conf_path = instance['nagios_conf']
nagios_conf = self.parse_nagios_config(conf_path)
instance_key = conf_path
# Retrocompatibility Code
elif 'nagios_perf_cfg' in instance:
conf_path = instance['nagios_perf_cfg']
nagios_conf = self.parse_nagios_config(conf_path)
instance["collect_host_performance_data"] = True
instance["collect_service_performance_data"] = True
instance_key = conf_path
if 'nagios_log' in instance:
nagios_conf["log_file"] = instance['nagios_log']
if instance_key is None:
instance_key = instance['nagios_log']
# End of retrocompatibility code
if not nagios_conf:
self.log.warning("Missing path to nagios_conf")
continue
if 'log_file' in nagios_conf and \
instance.get('collect_events', True):
self.log.debug("Starting to tail the event log")
tailers.append(NagiosEventLogTailer(
log_path=nagios_conf['log_file'],
file_template=None,
logger=self.log,
hostname=self.hostname,
event_func=self.event,
gauge_func=self.gauge,
freq=check_freq,
passive_checks=instance.get('passive_checks_events', False)))
if 'host_perfdata_file' in nagios_conf and \
'host_perfdata_file_template' in nagios_conf and \
instance.get('collect_host_performance_data', False):
self.log.debug("Starting to tail the host_perfdata file")
tailers.append(NagiosHostPerfDataTailer(
log_path=nagios_conf['host_perfdata_file'],
file_template=nagios_conf['host_perfdata_file_template'],
logger=self.log,
hostname=self.hostname,
event_func=self.event,
gauge_func=self.gauge,
freq=check_freq))
if 'service_perfdata_file' in nagios_conf and \
'service_perfdata_file_template' in nagios_conf and \
instance.get('collect_service_performance_data', False):
self.log.debug("Starting to tail the service_perfdata file")
tailers.append(NagiosServicePerfDataTailer(
log_path=nagios_conf['service_perfdata_file'],
file_template=nagios_conf['service_perfdata_file_template'],
logger=self.log,
hostname=self.hostname,
event_func=self.event,
gauge_func=self.gauge,
freq=check_freq))
self.nagios_tails[instance_key] = tailers
def parse_nagios_config(self, filename):
output = {}
f = None
try:
f = open(filename)
for line in f:
line = line.strip()
if not line:
continue
for key in self.NAGIOS_CONF_KEYS:
m = key.match(line)
if m:
output[m.group('key')] = m.group('value')
break
return output
except Exception as e:
# Can't parse, assume it's just not working
# Don't return an incomplete config
self.log.exception(e)
raise Exception("Could not parse Nagios config file")
finally:
if f is not None:
f.close()
def check(self, instance):
'''
Parse until the end of each tailer associated with this instance.
We match instance and tailers based on the path to the Nagios configuration file
Special case: Compatibility with the old conf when no conf file is specified
but the path to the event_log is given
'''
instance_key = instance.get('nagios_conf',
instance.get('nagios_perf_cfg',
instance.get('nagios_log',
None)))
# Bad configuration: This instance does not contain any necessary configuration
if not instance_key or instance_key not in self.nagios_tails:
raise Exception('No Nagios configuration file specified')
for tailer in self.nagios_tails[instance_key]:
tailer.check()
class NagiosTailer(object):
def __init__(self, log_path, file_template, logger, hostname, event_func, gauge_func, freq):
'''
:param log_path: string, path to the file to parse
:param file_template: string, format of the perfdata file
:param logger: Logger object
:param hostname: string, name of the host this agent is running on
:param event_func: function to create event, should accept dict
:param gauge_func: function to report a gauge
:param freq: int, size of bucket to aggregate perfdata metrics
'''
self.log_path = log_path
self.log = logger
self.gen = None
self.tail = None
self.hostname = hostname
self._event = event_func
self._gauge = gauge_func
self._line_parsed = 0
self._freq = freq
if file_template is not None:
self.compile_file_template(file_template)
self.tail = TailFile(self.log, self.log_path, self._parse_line)
self.gen = self.tail.tail(line_by_line=False, move_end=True)
self.gen.next()
def check(self):
self._line_parsed = 0
# read until the end of file
try:
self.log.debug("Start nagios check for file %s" % (self.log_path))
self.gen.next()
self.log.debug("Done nagios check for file %s (parsed %s line(s))" %
(self.log_path, self._line_parsed))
except StopIteration, e:
self.log.exception(e)
self.log.warning("Can't tail %s file" % (self.log_path))
def compile_file_template(self, file_template):
try:
# Escape characters that will be interpreted as regex bits
# e.g. [ and ] in "[SERVICEPERFDATA]"
regex = re.sub(r'[[\]*]', r'.', file_template)
regex = re.sub(r'\$([^\$]*)\$', r'(?P<\1>[^\$]*)', regex)
self.line_pattern = re.compile(regex)
except Exception, e:
raise InvalidDataTemplate("%s (%s)" % (file_template, e))
class NagiosEventLogTailer(NagiosTailer):
def __init__(self, log_path, file_template, logger, hostname, event_func,
gauge_func, freq, passive_checks=False):
'''
:param log_path: string, path to the file to parse
:param file_template: string, format of the perfdata file
:param logger: Logger object
:param hostname: string, name of the host this agent is running on
:param event_func: function to create event, should accept dict
:param gauge_func: function to report a gauge
:param freq: int, size of bucket to aggregate perfdata metrics
:param passive_checks: bool, enable or not passive checks events
'''
self.passive_checks = passive_checks
super(NagiosEventLogTailer, self).__init__(
log_path, file_template,
logger, hostname, event_func, gauge_func, freq
)
def _parse_line(self, line):
"""Actual nagios parsing
Return True if we found an event, False otherwise
"""
# first isolate the timestamp and the event type
try:
self._line_parsed = self._line_parsed + 1
m = RE_LINE_REG.match(line)
if m is None:
m = RE_LINE_EXT.match(line)
if m is None:
return False
self.log.debug("Matching line found %s" % line)
(tstamp, event_type, remainder) = m.groups()
tstamp = int(tstamp)
# skip passive checks reports by default for spamminess
if event_type == 'PASSIVE SERVICE CHECK' and not self.passive_checks:
return False
# then retrieve the event format for each specific event type
fields = EVENT_FIELDS.get(event_type, None)
if fields is None:
self.log.warning("Ignoring unknown nagios event for line: %s" % (line[:-1]))
return False
elif fields is False:
# Ignore and skip
self.log.debug("Ignoring Nagios event for line: %s" % (line[:-1]))
return False
# and parse the rest of the line
parts = map(lambda p: p.strip(), remainder.split(';'))
# Chop parts we don't recognize
parts = parts[:len(fields._fields)]
event = self.create_event(tstamp, event_type, self.hostname, fields._make(parts))
self._event(event)
self.log.debug("Nagios event: %s" % (event))
return True
except Exception:
self.log.exception("Unable to create a nagios event from line: [%s]" % (line))
return False
def create_event(self, timestamp, event_type, hostname, fields):
"""Factory method called by the parsers
"""
d = fields._asdict()
d.update({'timestamp': timestamp,
'event_type': event_type})
# if host is localhost, turn that into the internal host name
host = d.get('host', None)
if host == "localhost":
d["host"] = hostname
return d
class NagiosPerfDataTailer(NagiosTailer):
perfdata_field = '' # Should be overriden by subclasses
metric_prefix = 'nagios'
pair_pattern = re.compile(r"".join([
r"'?(?P<label>[^=']+)'?=",
r"(?P<value>[-0-9.]+)",
r"(?P<unit>s|us|ms|%|B|KB|MB|GB|TB|c)?",
r"(;(?P<warn>@?[-0-9.~]*:?[-0-9.~]*))?",
r"(;(?P<crit>@?[-0-9.~]*:?[-0-9.~]*))?",
r"(;(?P<min>[-0-9.]*))?",
r"(;(?P<max>[-0-9.]*))?",
]))
@staticmethod
def underscorize(s):
return s.replace(' ', '_').lower()
def _get_metric_prefix(self, data):
raise NotImplementedError()
def _parse_line(self, line):
matched = self.line_pattern.match(line)
output = []
if matched:
self.log.debug("Matching line found %s" % line)
data = matched.groupdict()
metric_prefix = self._get_metric_prefix(data)
# Parse the prefdata values, which are a space-delimited list of:
# 'label'=value[UOM];[warn];[crit];[min];[max]
perf_data = data.get(self.perfdata_field, '').split(' ')
for pair in perf_data:
pair_match = self.pair_pattern.match(pair)
if not pair_match:
continue
else:
pair_data = pair_match.groupdict()
label = pair_data['label']
timestamp = data.get('TIMET', None)
if timestamp is not None:
timestamp = (int(float(timestamp)) / self._freq) * self._freq
value = float(pair_data['value'])
device_name = None
if '/' in label:
# Special case: if the label begins
# with a /, treat the label as the device
# and use the metric prefix as the metric name
metric = '.'.join(metric_prefix)
device_name = label
else:
# Otherwise, append the label to the metric prefix
# and use that as the metric name
metric = '.'.join(metric_prefix + [label])
host_name = data.get('HOSTNAME', self.hostname)
optional_keys = ['unit', 'warn', 'crit', 'min', 'max']
tags = []
for key in optional_keys:
attr_val = pair_data.get(key, None)
if attr_val is not None and attr_val != '':
tags.append("{0}:{1}".format(key, attr_val))
self._gauge(metric, value, tags, host_name, device_name, timestamp)
class NagiosHostPerfDataTailer(NagiosPerfDataTailer):
perfdata_field = 'HOSTPERFDATA'
def _get_metric_prefix(self, line_data):
return [self.metric_prefix, 'host']
class NagiosServicePerfDataTailer(NagiosPerfDataTailer):
perfdata_field = 'SERVICEPERFDATA'
def _get_metric_prefix(self, line_data):
metric = [self.metric_prefix]
middle_name = line_data.get('SERVICEDESC', None)
if middle_name:
metric.append(middle_name.replace(' ', '_').lower())
return metric
class InvalidDataTemplate(Exception):
pass
|
conejoninja/pelisalacarta
|
refs/heads/master
|
python/main-classic/servers/mailru.py
|
9
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para mail.ru
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import jsontools
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[mailru.py] get_video_url(page_url='%s')" % (page_url))
video_urls = []
## Carga la página
## Nueva url al final de los datos
data = scrapertools.cache_page(page_url)
## Carga los nuevos datos de la nueva url
#<a href="http://r.mail.ru/clb15944866/my.mail.ru/mail/gottsu04/video/_myvideo/709.html?from=watchonmailru" class="b-player__button" target="_blank">Watch video</a>
url = scrapertools.get_match(data,'<a href="([^"]+)" class="b-player__button" target="_blank">Watch video</a>')
data = scrapertools.cache_page(url)
## API ##
## Se necesita la id del vídeo para formar la url de la API
#<link rel="image_src" href="http://filed9-14.my.mail.ru/pic?url=http%3A%2F%2Fvideoapi.my.mail.ru%2Ffile%2Fsc03%2F3450622080461046469&mw=&mh=&sig=5d50e747aa59107d805263043e3efe64" />
id_api_video = scrapertools.get_match(data,'sc\d+%2F([^&]+)&mw')
url = "http://videoapi.my.mail.ru/videos/" + id_api_video + ".json"
## Carga los datos y los headers
data, headers = scrapertools.read_body_and_headers(url)
data = jsontools.load_json( data )
## La cookie video_key necesaria para poder visonar el video
for cookie in headers:
if 'set-cookie' in cookie: break
cookie_video_key = scrapertools.get_match(cookie[1], '(video_key=[a-f0-9]+)')
## Formar url del video + cookie video_key
media_url = data['videos'][0]['url'] + "|Cookie=" + cookie_video_key
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [mail.ru]", media_url ] )
for video_url in video_urls:
logger.info("[mail.ru] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
logger.info("[mailru.py] find_videos") #(data='%s')" % (data))
encontrados = set()
devuelve = []
# http://videoapi.my.mail.ru/videos/embed/mail/bartos1100/_myvideo/1136.html
patronvideos = 'videoapi.my.mail.ru/videos/embed/mail/([a-zA-Z0-9]+)/_myvideo/(\d+).html'
logger.info("[mailru.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[mail.ru]"
url = "http://videoapi.my.mail.ru/videos/embed/mail/"+match[0]+"/_myvideo/"+match[1]+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'mailru' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
SergioML9/emotion_simulation
|
refs/heads/master
|
configuration/model_settings.py
|
2
|
import configuration.automation_settings as automation_settings
overtime_contribution = 0.021
rest_time_contribution = 0.016
email_reception_contribution = 0.0029
ambient_contribution = 0.0012
noise_contribution = 0.03
luminosity_contibution = 0.000153
if automation_settings.automate_tasks: tasks_automation_contribution = 0.25
else: tasks_automation_contribution = 0
|
rcbops/nova-buildpackage
|
refs/heads/master
|
nova/tests/test_virt.py
|
14
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import driver
FLAGS = flags.FLAGS
class TestVirtDriver(test.TestCase):
def test_block_device(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
empty_block_device_info = {}
self.assertEqual(
driver.block_device_info_get_root(block_device_info), '/dev/sda')
self.assertEqual(
driver.block_device_info_get_root(empty_block_device_info), None)
self.assertEqual(
driver.block_device_info_get_root(None), None)
self.assertEqual(
driver.block_device_info_get_swap(block_device_info), swap)
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['device_name'], None)
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['device_name'],
None)
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['swap_size'],
0)
self.assertEqual(
driver.block_device_info_get_swap(None)['device_name'], None)
self.assertEqual(
driver.block_device_info_get_swap(None)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_ephemerals(block_device_info),
ephemerals)
self.assertEqual(
driver.block_device_info_get_ephemerals(empty_block_device_info),
[])
self.assertEqual(
driver.block_device_info_get_ephemerals(None),
[])
def test_swap_is_usable(self):
self.assertFalse(driver.swap_is_usable(None))
self.assertFalse(driver.swap_is_usable({'device_name': None}))
self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 0}))
self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 1}))
|
ParthGanatra/mitmproxy
|
refs/heads/master
|
mitmproxy/platform/windows.py
|
3
|
import configargparse
import cPickle as pickle
from ctypes import byref, windll, Structure
from ctypes.wintypes import DWORD
import os
import socket
import SocketServer
import struct
import threading
import time
from collections import OrderedDict
from pydivert.windivert import WinDivert
from pydivert.enum import Direction, Layer, Flag
PROXY_API_PORT = 8085
class Resolver(object):
def __init__(self):
TransparentProxy.setup()
self.socket = None
self.lock = threading.RLock()
self._connect()
def _connect(self):
if self.socket:
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("127.0.0.1", PROXY_API_PORT))
self.wfile = self.socket.makefile('wb')
self.rfile = self.socket.makefile('rb')
pickle.dump(os.getpid(), self.wfile)
def original_addr(self, csock):
client = csock.getpeername()[:2]
with self.lock:
try:
pickle.dump(client, self.wfile)
self.wfile.flush()
addr = pickle.load(self.rfile)
if addr is None:
raise RuntimeError("Cannot resolve original destination.")
addr = list(addr)
addr[0] = str(addr[0])
addr = tuple(addr)
return addr
except (EOFError, socket.error):
self._connect()
return self.original_addr(csock)
class APIRequestHandler(SocketServer.StreamRequestHandler):
"""
TransparentProxy API: Returns the pickled server address, port tuple
for each received pickled client address, port tuple.
"""
def handle(self):
proxifier = self.server.proxifier
pid = None
try:
pid = pickle.load(self.rfile)
if pid is not None:
proxifier.trusted_pids.add(pid)
while True:
client = pickle.load(self.rfile)
server = proxifier.client_server_map.get(client, None)
pickle.dump(server, self.wfile)
self.wfile.flush()
except (EOFError, socket.error):
proxifier.trusted_pids.discard(pid)
class APIServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, proxifier, *args, **kwargs):
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.proxifier = proxifier
self.daemon_threads = True
# Windows error.h
ERROR_INSUFFICIENT_BUFFER = 0x7A
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx
class MIB_TCPROW2(Structure):
_fields_ = [
('dwState', DWORD),
('dwLocalAddr', DWORD),
('dwLocalPort', DWORD),
('dwRemoteAddr', DWORD),
('dwRemotePort', DWORD),
('dwOwningPid', DWORD),
('dwOffloadState', DWORD)
]
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb485772(v=vs.85).aspx
def MIB_TCPTABLE2(size):
class _MIB_TCPTABLE2(Structure):
_fields_ = [('dwNumEntries', DWORD),
('table', MIB_TCPROW2 * size)]
return _MIB_TCPTABLE2()
class TransparentProxy(object):
"""
Transparent Windows Proxy for mitmproxy based on WinDivert/PyDivert.
Requires elevated (admin) privileges. Can be started separately by manually running the file.
This module can be used to intercept and redirect all traffic that is forwarded by the user's machine and
traffic sent from the machine itself.
How it works:
(1) First, we intercept all packages that match our filter (destination port 80 and 443 by default).
We both consider traffic that is forwarded by the OS (WinDivert's NETWORK_FORWARD layer) as well as traffic
sent from the local machine (WinDivert's NETWORK layer). In the case of traffic from the local machine, we need to
distinguish between traffc sent from applications and traffic sent from the proxy. To accomplish this, we use
Windows' GetTcpTable2 syscall to determine the source application's PID.
For each intercepted package, we
1. Store the source -> destination mapping (address and port)
2. Remove the package from the network (by not reinjecting it).
3. Re-inject the package into the local network stack, but with the destination address changed to the proxy.
(2) Next, the proxy receives the forwarded packet, but does not know the real destination yet (which we overwrote
with the proxy's address). On Linux, we would now call getsockopt(SO_ORIGINAL_DST), but that unfortunately doesn't
work on Windows. However, we still have the correct source information. As a workaround, we now access the forward
module's API (see APIRequestHandler), submit the source information and get the actual destination back (which the
forward module stored in (1.3)).
(3) The proxy now establish the upstream connection as usual.
(4) Finally, the proxy sends the response back to the client. To make it work, we need to change the packet's source
address back to the original destination (using the mapping from (1.3)), to which the client believes he is talking
to.
Limitations:
- No IPv6 support. (Pull Requests welcome)
- TCP ports do not get re-used simulateously on the client, i.e. the proxy will fail if application X
connects to example.com and example.org from 192.168.0.42:4242 simultaneously. This could be mitigated by
introducing unique "meta-addresses" which mitmproxy sees, but this would remove the correct client info from
mitmproxy.
"""
def __init__(self,
mode="both",
redirect_ports=(80, 443), custom_filter=None,
proxy_addr=False, proxy_port=8080,
api_host="localhost", api_port=PROXY_API_PORT,
cache_size=65536):
"""
:param mode: Redirection operation mode: "forward" to only redirect forwarded packets, "local" to only redirect
packets originating from the local machine, "both" to redirect both.
:param redirect_ports: if the destination port is in this tuple, the requests are redirected to the proxy.
:param custom_filter: specify a custom WinDivert filter to select packets that should be intercepted. Overrides
redirect_ports setting.
:param proxy_addr: IP address of the proxy (IP within a network, 127.0.0.1 does not work). By default,
this is detected automatically.
:param proxy_port: Port the proxy is listenting on.
:param api_host: Host the forward module API is listening on.
:param api_port: Port the forward module API is listening on.
:param cache_size: Maximum number of connection tuples that are stored. Only relevant in very high
load scenarios.
"""
if proxy_port in redirect_ports:
raise ValueError("The proxy port must not be a redirect port.")
if not proxy_addr:
# Auto-Detect local IP.
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
proxy_addr = s.getsockname()[0]
s.close()
self.mode = mode
self.proxy_addr, self.proxy_port = proxy_addr, proxy_port
self.connection_cache_size = cache_size
self.client_server_map = OrderedDict()
self.api = APIServer(self, (api_host, api_port), APIRequestHandler)
self.api_thread = threading.Thread(target=self.api.serve_forever)
self.api_thread.daemon = True
self.driver = WinDivert()
self.driver.register()
self.request_filter = custom_filter or " or ".join(
("tcp.DstPort == %d" %
p) for p in redirect_ports)
self.request_forward_handle = None
self.request_forward_thread = threading.Thread(
target=self.request_forward)
self.request_forward_thread.daemon = True
self.addr_pid_map = dict()
self.trusted_pids = set()
self.tcptable2 = MIB_TCPTABLE2(0)
self.tcptable2_size = DWORD(0)
self.request_local_handle = None
self.request_local_thread = threading.Thread(target=self.request_local)
self.request_local_thread.daemon = True
# The proxy server responds to the client. To the client,
# this response should look like it has been sent by the real target
self.response_filter = "outbound and tcp.SrcPort == %d" % proxy_port
self.response_handle = None
self.response_thread = threading.Thread(target=self.response)
self.response_thread.daemon = True
self.icmp_handle = None
@classmethod
def setup(cls):
# TODO: Make sure that server can be killed cleanly. That's a bit difficult as we don't have access to
# controller.should_exit when this is called.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_unavailable = s.connect_ex(("127.0.0.1", PROXY_API_PORT))
if server_unavailable:
proxifier = TransparentProxy()
proxifier.start()
def start(self):
self.api_thread.start()
# Block all ICMP requests (which are sent on Windows by default).
# In layman's terms: If we don't do this, our proxy machine tells the client that it can directly connect to the
# real gateway if they are on the same network.
self.icmp_handle = self.driver.open_handle(
filter="icmp",
layer=Layer.NETWORK,
flags=Flag.DROP)
self.response_handle = self.driver.open_handle(
filter=self.response_filter,
layer=Layer.NETWORK)
self.response_thread.start()
if self.mode == "forward" or self.mode == "both":
self.request_forward_handle = self.driver.open_handle(
filter=self.request_filter,
layer=Layer.NETWORK_FORWARD)
self.request_forward_thread.start()
if self.mode == "local" or self.mode == "both":
self.request_local_handle = self.driver.open_handle(
filter=self.request_filter,
layer=Layer.NETWORK)
self.request_local_thread.start()
def shutdown(self):
if self.mode == "local" or self.mode == "both":
self.request_local_handle.close()
if self.mode == "forward" or self.mode == "both":
self.request_forward_handle.close()
self.response_handle.close()
self.icmp_handle.close()
self.api.shutdown()
def recv(self, handle):
"""
Convenience function that receives a packet from the passed handler and handles error codes.
If the process has been shut down, (None, None) is returned.
"""
try:
raw_packet, metadata = handle.recv()
return self.driver.parse_packet(raw_packet), metadata
except WindowsError as e:
if e.winerror == 995:
return None, None
else:
raise
def fetch_pids(self):
ret = windll.iphlpapi.GetTcpTable2(
byref(
self.tcptable2), byref(
self.tcptable2_size), 0)
if ret == ERROR_INSUFFICIENT_BUFFER:
self.tcptable2 = MIB_TCPTABLE2(self.tcptable2_size.value)
self.fetch_pids()
elif ret == 0:
for row in self.tcptable2.table[:self.tcptable2.dwNumEntries]:
local = (
socket.inet_ntoa(struct.pack('L', row.dwLocalAddr)),
socket.htons(row.dwLocalPort)
)
self.addr_pid_map[local] = row.dwOwningPid
else:
raise RuntimeError("Unknown GetTcpTable2 return code: %s" % ret)
def request_local(self):
while True:
packet, metadata = self.recv(self.request_local_handle)
if not packet:
return
client = (packet.src_addr, packet.src_port)
if client not in self.addr_pid_map:
self.fetch_pids()
# If this fails, we most likely have a connection from an external client to
# a local server on 80/443. In this, case we always want to proxy
# the request.
pid = self.addr_pid_map.get(client, None)
if pid not in self.trusted_pids:
self._request(packet, metadata)
else:
self.request_local_handle.send((packet.raw, metadata))
def request_forward(self):
"""
Redirect packages to the proxy
"""
while True:
packet, metadata = self.recv(self.request_forward_handle)
if not packet:
return
self._request(packet, metadata)
def _request(self, packet, metadata):
# print(" * Redirect client -> server to proxy")
# print("%s:%s -> %s:%s" % (packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port))
client = (packet.src_addr, packet.src_port)
server = (packet.dst_addr, packet.dst_port)
if client in self.client_server_map:
# Force re-add to mark as "newest" entry in the dict.
del self.client_server_map[client]
while len(self.client_server_map) > self.connection_cache_size:
self.client_server_map.popitem(False)
self.client_server_map[client] = server
packet.dst_addr, packet.dst_port = self.proxy_addr, self.proxy_port
metadata.direction = Direction.INBOUND
packet = self.driver.update_packet_checksums(packet)
# Use any handle thats on the NETWORK layer - request_local may be
# unavailable.
self.response_handle.send((packet.raw, metadata))
def response(self):
"""
Spoof source address of packets send from the proxy to the client
"""
while True:
packet, metadata = self.recv(self.response_handle)
if not packet:
return
# If the proxy responds to the client, let the client believe the target server sent the packets.
# print(" * Adjust proxy -> client")
client = (packet.dst_addr, packet.dst_port)
server = self.client_server_map.get(client, None)
if server:
packet.src_addr, packet.src_port = server
else:
print("Warning: Previously unseen connection from proxy to %s:%s." % client)
packet = self.driver.update_packet_checksums(packet)
self.response_handle.send((packet.raw, metadata))
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description="Windows Transparent Proxy")
parser.add_argument(
'--mode',
choices=[
'forward',
'local',
'both'],
default="both",
help='redirection operation mode: "forward" to only redirect forwarded packets, '
'"local" to only redirect packets originating from the local machine')
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--redirect-ports",
nargs="+",
type=int,
default=[
80,
443],
metavar="80",
help="ports that should be forwarded to the proxy")
group.add_argument(
"--custom-filter",
default=None,
metavar="WINDIVERT_FILTER",
help="Custom WinDivert interception rule.")
parser.add_argument("--proxy-addr", default=False,
help="Proxy Server Address")
parser.add_argument("--proxy-port", type=int, default=8080,
help="Proxy Server Port")
parser.add_argument("--api-host", default="localhost",
help="API hostname to bind to")
parser.add_argument("--api-port", type=int, default=PROXY_API_PORT,
help="API port")
parser.add_argument("--cache-size", type=int, default=65536,
help="Maximum connection cache size")
options = parser.parse_args()
proxy = TransparentProxy(**vars(options))
proxy.start()
print(" * Transparent proxy active.")
print(" Filter: {0}".format(proxy.request_filter))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" * Shutting down...")
proxy.shutdown()
print(" * Shut down.")
|
Ircam-Web/mezzanine-organization
|
refs/heads/master
|
organization/projects/migrations/0084_auto_20190304_2221.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-03-04 21:21
from __future__ import unicode_literals
from django.db import migrations
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('organization-projects', '0083_auto_20190221_1706'),
]
operations = [
migrations.AddField(
model_name='projectpage',
name='content_en',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Content'),
),
migrations.AddField(
model_name='projectpage',
name='content_fr',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Content'),
),
]
|
SimonSuster/lxmls-toolkit
|
refs/heads/master
|
lxmls/labs/day0.py
|
2
|
import numpy as np
import math
import matplotlib.pyplot as plt
## Exercise about gradient descent
def get_y(x):
value = pow((x+2),2) - 16*math.exp(-pow((x-2),2))
return value
def get_grad(x):
return (2*x+4)-16*(-2*x + 4)*np.exp(-((x-2)**2))
def gradient_descent(start_x,func,grad):
# Precision of the solution
prec = 0.0001
#Use a fixed small step size
step_size = 0.1
#max iterations
max_iter = 100
x_new = start_x
res = []
for i in xrange(max_iter):
x_old = x_new
#Use beta iguals to -1 for gradient descent
x_new = x_old - step_size * get_grad(x_new)
f_x_new = get_y(x_new)
f_x_old = get_y(x_old)
res.append([x_new,f_x_new])
if(abs(f_x_new -f_x_old) < prec):
print "change in function values to small, leaving"
return np.array(res)
print "exceeded maximum number of iterations, leaving"
return np.array(res)
def show_optimization_exercise():
x = np.arange(-8,8,0.001)
y = map(lambda u: get_y(u),x)
plt.plot(x,y)
x_0 = -8
res = gradient_descent(x_0,get_y,get_grad)
plt.plot(res[:,0],res[:,1],'+')
x_0 = 8
res = gradient_descent(x_0,get_y,get_grad)
plt.plot(res[:,0],res[:,1],'*')
plt.show()
|
mbohlool/client-python
|
refs/heads/master
|
kubernetes/test/test_v1_local_subject_access_review.py
|
1
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_local_subject_access_review import V1LocalSubjectAccessReview
class TestV1LocalSubjectAccessReview(unittest.TestCase):
""" V1LocalSubjectAccessReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1LocalSubjectAccessReview(self):
"""
Test V1LocalSubjectAccessReview
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_local_subject_access_review.V1LocalSubjectAccessReview()
pass
if __name__ == '__main__':
unittest.main()
|
realgo/luigi
|
refs/heads/master
|
test/task_test.py
|
9
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import doctest
from helpers import unittest
from datetime import datetime, timedelta
import luigi
import luigi.task
from luigi.task_register import load_task
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BoolParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
insignificant_param = luigi.Parameter(significant=False)
class DefaultInsignificantParamTask(luigi.Task):
insignificant_param = luigi.Parameter(significant=False, default='value')
necessary_param = luigi.Parameter(significant=False)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
params = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
insignificant_param='test')
original = DummyTask(**params)
other = DummyTask.from_str_params(original.to_str_params())
self.assertEqual(original, other)
def test_task_from_str_insignificant(self):
params = {'necessary_param': 'needed'}
original = DefaultInsignificantParamTask(**params)
other = DefaultInsignificantParamTask.from_str_params(params)
self.assertEqual(original, other)
def test_task_missing_necessary_param(self):
with self.assertRaises(luigi.parameter.MissingParameterException):
DefaultInsignificantParamTask.from_str_params({})
def test_external_tasks_loadable(self):
task = load_task("luigi", "ExternalTask", {})
assert(isinstance(task, luigi.ExternalTask))
def test_flatten(self):
flatten = luigi.task.flatten
self.assertEqual(sorted(flatten({'a': 'foo', 'b': 'bar'})), ['bar', 'foo'])
self.assertEqual(sorted(flatten(['foo', ['bar', 'troll']])), ['bar', 'foo', 'troll'])
self.assertEqual(flatten('foo'), ['foo'])
self.assertEqual(flatten(42), [42])
self.assertEqual(flatten((len(i) for i in ["foo", "troll"])), [3, 5])
self.assertRaises(TypeError, flatten, (len(i) for i in ["foo", "troll", None]))
if __name__ == '__main__':
unittest.main()
|
noogel/xyzStudyPython
|
refs/heads/master
|
tornado/translate_tornado_4_2_1/tornado/test/util.py
|
46
|
from __future__ import absolute_import, division, print_function, with_statement
import os
import socket
import sys
from tornado.testing import bind_unused_port
# Encapsulate the choice of unittest or unittest2 here.
# To be used as 'from tornado.test.util import unittest'.
if sys.version_info < (2, 7):
# In py26, we must always use unittest2.
import unittest2 as unittest
else:
# Otherwise, use whichever version of unittest was imported in
# tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipIfNoIPv6 = unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
|
sjohannes/exaile
|
refs/heads/gdbus
|
xlgui/widgets/smart_playlist_editor.py
|
1
|
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import Gtk
from xl import main, playlist, settings
from xl.nls import gettext as _
from . import dialogs
from .filter import (
EntryField,
FilterDialog,
ComboEntryField,
MultiEntryField,
NullField,
QuotedEntryField,
SpinButtonAndComboField,
SpinLabelField,
)
import logging
logger = logging.getLogger(__name__)
def N_(x):
return x
class EntrySecondsField(MultiEntryField):
def __init__(self):
MultiEntryField.__init__(self, (50, _('seconds')))
class EntryAndEntryField(MultiEntryField):
def __init__(self):
# TRANSLATORS: Logical AND used for smart playlists
MultiEntryField.__init__(self, (50, _('and'), 50))
class EntryDaysField(MultiEntryField):
def __init__(self):
MultiEntryField.__init__(self, (50, _('days')))
class PlaylistField(ComboEntryField):
def __init__(self):
playlists = []
playlists.extend(main.exaile().smart_playlists.list_playlists())
playlists.extend(main.exaile().playlists.list_playlists())
playlists.sort()
ComboEntryField.__init__(self, playlists)
DATE_FIELDS = [N_('seconds'), N_('minutes'), N_('hours'), N_('days'), N_('weeks')]
class SpinDateField(SpinButtonAndComboField):
def __init__(self):
SpinButtonAndComboField.__init__(self, DATE_FIELDS)
class SpinSecondsField(SpinLabelField):
def __init__(self):
SpinLabelField.__init__(self, _('seconds'))
class SpinRating(SpinLabelField):
def __init__(self):
SpinLabelField.__init__(self, '', settings.get_option("rating/maximum", 5), 0)
class SpinNothing(SpinLabelField):
def __init__(self):
SpinLabelField.__init__(self, '')
# This sets up the CRITERIA for all the available types of tags
# that exaile supports. The actual CRITERIA dict is populated
# using xl.metadata.tags.tag_data.
#
# NOTE: The following strings are already marked for translation in _TRANS and
# _NMAP, and will be really translated by filtergui; no need to clutter the
# code here.
_criteria_types = {
# fmt: off
# TODO
'bitrate': [
('is', SpinNothing),
('less than', SpinNothing),
('greater than', SpinNothing),
('between', EntryAndEntryField),
('at least', SpinNothing),
('at most', SpinNothing),
('is set', NullField),
('is not set', NullField),
],
'image': None,
'int': [
('is', SpinNothing),
('less than', SpinNothing),
('greater than', SpinNothing),
('between', EntryAndEntryField),
('at least', SpinNothing),
('at most', SpinNothing),
('is set', NullField),
('is not set', NullField),
],
'location': [
('is', QuotedEntryField),
('is not', QuotedEntryField),
('contains', QuotedEntryField),
('does not contain', QuotedEntryField),
('regex', QuotedEntryField),
('not regex', QuotedEntryField),
],
'text': [
('is', EntryField),
('is not', EntryField),
('contains', EntryField),
('does not contain', EntryField),
('contains word', EntryField),
('does not contain word', EntryField),
('regex', EntryField),
('not regex', EntryField),
('is set', NullField),
('is not set', NullField),
],
'time': [
('at least', SpinSecondsField),
('at most', SpinSecondsField),
('is', SpinSecondsField),
('is not', SpinSecondsField),
],
'timestamp': [
('in the last', SpinDateField),
('not in the last', SpinDateField),
],
'__playlist': [
('Track is in', PlaylistField),
('Track not in', PlaylistField),
],
'__rating': [
('greater than', SpinRating),
('less than', SpinRating),
('at least', SpinRating),
('at most', SpinRating),
],
# fmt: on
}
# aliases
_criteria_types['datetime'] = _criteria_types['text'] # TODO: fix
_criteria_types['multiline'] = _criteria_types['text']
_criteria_types['dblnum'] = _criteria_types['int']
# This gets populated below. Only add special tags/searches that don't have a
# valid entry in tag_data
CRITERIA = [
('Rating', _criteria_types['__rating']),
('Playlist', _criteria_types['__playlist']),
]
# NOTE: We use N_ (fake gettext) because these strings are translated later by
# the filter GUI. If we use _ (real gettext) here, filtergui will try to
# translate already-translated strings, which makes no sense. This is partly due
# to the old design of storing untranslated strings (instead of operators) in
# the dynamic playlist database.
_TRANS = {
# TRANSLATORS: True if haystack is equal to needle
N_('is'): '==',
# TRANSLATORS: True if haystack is not equal to needle
N_('is not'): '!==',
# TRANSLATORS: True if the specified tag is present (uses the NullField
# to compare to __null__)
N_('is set'): '<!==>',
# TRANSLATORS: True if the specified tag is not present (uses the NullField
# to compare to __null__)
N_('is not set'): '<==>',
# TRANSLATORS: True if haystack contains needle
N_('contains'): '=',
# TRANSLATORS: True if haystack does not contain needle
N_('does not contain'): '!=',
# TRANSLATORS: True if haystack contains whole word
N_('contains word'): 'w=',
# TRANSLATORS: True if haystack does not contain whole word
N_('does not contain word'): '!w=',
# TRANSLATORS: True if haystack matches regular expression
N_('regex'): '~',
# TRANSLATORS: True if haystack does not match regular expression
N_('not regex'): '!~',
# TRANSLATORS: Example: rating >= 5
N_('at least'): '>=',
# TRANSLATORS: Example: rating <= 3
N_('at most'): '<=',
# TRANSLATORS: Example: year < 1999
N_('before'): '<',
# TRANSLATORS: Example: year > 2002
N_('after'): '>',
# TRANSLATORS: Example: 1980 <= year <= 1987
N_('between'): '><',
N_('greater than'): '>',
N_('less than'): '<',
# TRANSLATORS: Example: track has been added in the last 2 days
N_('in the last'): '>=',
# TRANSLATORS: Example: track has not been added in the last 5 hours
N_('not in the last'): '<',
# TRANSLATORS: True if a track is contained in the specified playlist
N_('Track is in'): 'pin',
# TRANSLATORS: True if a track is not contained in the specified playlist
N_('Track not in'): '!pin',
}
# This table is a reverse lookup for the actual tag name from a display
# name.
# This gets populated below. Only add special tags/searches here.
_NMAP = {
N_('Rating'): '__rating', # special
N_('Playlist'): '__playlist', # not a real tag
}
_REV_NMAP = {}
# update the tables based on the globally stored tag list
def __update_maps():
from xl.metadata.tags import tag_data
for tag, data in tag_data.iteritems():
if data is None:
continue
# don't catch this KeyError -- if it fails, fix it!
criteria = _criteria_types[data.type]
if criteria is None:
continue
CRITERIA.append((data.name, criteria))
_NMAP[data.name] = tag
for k, v in _NMAP.iteritems():
if v in _REV_NMAP:
raise ValueError("_REV_NMAP Internal error: '%s', '%s'" % (k, v))
_REV_NMAP[v] = k
__update_maps()
class SmartPlaylistEditor(object):
@classmethod
def create(cls, collection, smart_manager, parent=None):
"""
Shows a dialog to create a new smart playlist
:param collection: Collection object
:param smart_manager: SmartPlaylistManager object
:param parent: Dialog parent
:returns: New smart playlist, or None
"""
dialog = FilterDialog(_('Add Smart Playlist'), parent, CRITERIA)
dialog.set_transient_for(parent)
cls._attach_sort_widgets(dialog, None, None)
return cls._run_edit_dialog(dialog, collection, smart_manager, parent)
@classmethod
def edit(cls, pl, collection, smart_manager, parent=None):
"""
Shows a dialog to edit a smart playlist
:param collection: Collection object
:param smart_manager: SmartPlaylistManager object
:param parent: Dialog parent
:returns: New smart playlist, or None
"""
if not isinstance(pl, playlist.SmartPlaylist):
return
from xl.metadata.tags import tag_data
params = pl.search_params
state = []
for param in params:
(field, op, value) = param
rev_field = _REV_NMAP[field]
# because there are duplicates in _TRANS, cannot create a reverse
# mapping. Instead, search in set of criteria defined for the type
data_type = field
data = tag_data.get(field)
if data is not None:
data_type = data.type
for ct in _criteria_types[data_type]:
rev_op = ct[0]
if _TRANS[rev_op] == op:
break
else:
dialogs.error(parent, "Invalid operand for %s, omitting" % rev_field)
continue
state.append(([rev_field, rev_op], value))
state.reverse()
dialog = FilterDialog(_('Edit Smart Playlist'), parent, CRITERIA)
dialog.set_transient_for(parent)
dialog.set_name(pl.name)
dialog.set_match_any(pl.get_or_match())
dialog.set_limit(pl.get_return_limit())
dialog.set_random(pl.get_random_sort())
dialog.set_state(state)
cls._attach_sort_widgets(dialog, *pl.get_sort_tags())
return cls._run_edit_dialog(
dialog, collection, smart_manager, parent, orig_pl=pl
)
@classmethod
def _run_edit_dialog(cls, dialog, collection, smart_manager, parent, orig_pl=None):
'''internal helper function'''
while True:
result = dialog.run()
dialog.hide()
if result != Gtk.ResponseType.ACCEPT:
return
name = dialog.get_name()
matchany = dialog.get_match_any()
limit = dialog.get_limit()
state = dialog.get_state()
random = dialog.get_random()
sort_tags = cls._get_sort_tags(dialog)
if not name:
dialogs.error(
parent, _("You did " "not enter a name for your playlist")
)
continue
if not orig_pl or name != orig_pl.name:
try:
pl = smart_manager.get_playlist(name)
dialogs.error(
parent, _("The " "playlist name you entered is already in use.")
)
continue
except ValueError:
pass # playlist didn't exist
pl = playlist.SmartPlaylist(name, collection)
pl.set_or_match(matchany)
pl.set_return_limit(limit)
pl.set_random_sort(random)
pl.set_sort_tags(*sort_tags)
for item in state:
(field, op) = item[0]
value = item[1]
pl.add_param(_NMAP[field], _TRANS[op], value)
if orig_pl:
smart_manager.remove_playlist(pl.name)
smart_manager.save_playlist(pl)
return pl
@classmethod
def _attach_sort_widgets(cls, dialog, tag, reverse):
# Add sort widgets
from xl.metadata.tags import tag_data
dialog.sort_enable = sort_enable = Gtk.CheckButton.new_with_label(_('Sort by:'))
def _on_sort_enable_changed(ck):
sort_tags.set_sensitive(ck.get_active())
sort_order.set_sensitive(ck.get_active())
sort_enable.connect('toggled', _on_sort_enable_changed)
dialog.sort_tags = sort_tags = Gtk.ComboBoxText.new()
sort_tags.set_wrap_width(5)
for k, v in sorted(tag_data.iteritems()):
if v:
sort_tags.append(k, v.translated_name)
dialog.sort_order = sort_order = Gtk.ComboBoxText.new()
sort_order.append("False", _("Ascending"))
sort_order.append("True", _("Descending"))
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5)
box.pack_start(sort_enable, False, False, 0)
box.pack_start(sort_tags, False, False, 0)
box.pack_start(sort_order, False, False, 0)
box.show_all()
dialog.get_content_area().pack_start(box, False, False, 5)
# Set it up
if tag is None:
sort_enable.set_active(False)
sort_order.set_active_id("False")
else:
sort_enable.set_active(True)
sort_tags.set_active_id(tag)
sort_order.set_active_id(str(reverse))
_on_sort_enable_changed(sort_enable)
@classmethod
def _get_sort_tags(cls, dialog):
if not dialog.sort_enable.get_active():
return None, False
tag = dialog.sort_tags.get_active_id()
order = dialog.sort_order.get_active_id() == 'True'
return tag, order
|
akaihola/django-yui-loader
|
refs/heads/master
|
yui_loader/middleware.py
|
1
|
__doc__ = """
YUI_include -- YUI Loader as Django middleware
(c) Antti Kaihola 2008,2009 http://djangopeople.net/akaihola/
akaihol+django@ambitone.com
This server-side middleware implements some of the functionality in the Yahoo
User Interface Loader component. YUI JavaScript and CSS modules requirements
can be declared anywhere in the base, inherited or included templates, and the
resulting, optimized <script> and <link rel=stylesheet> tags are inserted at
the specified position of the resulting page.
Requirements may be specified in multiple locations. This is useful when zero
or more components are included in the HTML head section, and inherited and/or
included templates require possibly overlapping sets of YUI components in the
body across inherited and included templates. All tags are collected in the
head section, and duplicate tags are automatically eliminated.
The middleware understands component dependencies and ensures that resources
are loaded in the right order. It knows about built-in rollup files that ship
with YUI. By automatically using rolled-up files, the number of HTTP requests
is reduced.
The default syntax looks like HTML comments. Markup for the insertion point is
replaced with <script> and <link> tags:
<!-- YUI_init -->
Component requirements are indicated, possibly in multiple locations, with the
``YUI_include`` markup. It is removed from the resulting page by the
middleware. Example:
<!-- YUI_include fonts grids event dragdrop -->
Non-minified and compressed versions are requested, respectively, by:
<!-- YUI_version raw -->
<!-- YUI_version debug -->
Example:
<html><head>
<!-- YUI_init -->
<!-- YUI_include dom event -->
</head><body>
<!-- YUI_include element selector reset fonts base -->
</body></html>
Renders:
<html><head>
<link rel="stylesheet" type="text/css" href="http://yui.yahooapis.com/2.7.0/build/reset-fonts/reset-fonts.css" />
<link rel="stylesheet" type="text/css" href="http://yui.yahooapis.com/2.7.0/build/base/base-min.css" />
<script type="text/javascript" src="http://yui.yahooapis.com/2.7.0/build/yahoo-dom-event/yahoo-dom-event.js"></script>
<script type="text/javascript" src="http://yui.yahooapis.com/2.7.0/build/element/element-beta-min.js"></script>
<script type="text/javascript" src="http://yui.yahooapis.com/2.7.0/build/selector/selector-beta-min.js"></script>
</head><body>
</body></html>
The markup format can be customized with global Django settings. Example:
YUI_INCLUDE_PREFIX_RE = r'{!'
YUI_INCLUDE_SUFFIX_RE = r'!}'
would change markup to e.g. ``{! init !}`` and ``{! include dom event !}``.
The base URL is customized with the ``YUI_INCLUDE_BASE`` setting, e.g.:
YUI_INCLUDE_BASE = 'http://localhost:8000/yui/build/'
To remove the XHTML trailing slash from the <link> tag, use:
YUI_INCLUDE_CSS_TAG = '<link rel="stylesheet" type="text/css" href="%s">'
"""
import re
from django.conf import settings
from yui_loader.loader import YUILoader
PREFIX_RE = getattr(settings, 'YUI_INCLUDE_PREFIX_RE', '<!-- *YUI_')
SUFFIX_RE = getattr(settings, 'YUI_INCLUDE_SUFFIX_RE', ' *-->')
YUI_RE = re.compile(
r'%s(include|version) +(.*?)%s' % (PREFIX_RE, SUFFIX_RE))
YUI_ADDMODULE_RE = re.compile(
r'(?s)%saddModule\s*(\{\s*.*?\s*})\s*%s' % (PREFIX_RE, SUFFIX_RE))
YUI_INIT_RE = re.compile(
'%sinit%s' % (PREFIX_RE, SUFFIX_RE))
ACCEPTED_CONTENT_TYPES = ('text/html',
'text/xml',
'application/xhtml+xml',
'application/xml')
class YUIIncludeMiddleware(object):
def process_response(self, request, response):
if getattr(response, 'disable_yui_loader_middleware', False):
# TODO: support Django 1.5's StreamingHttpResponse too
return response
content_type = response['Content-Type'].split(';')[0]
if content_type not in ACCEPTED_CONTENT_TYPES:
return response
components = set()
loader = YUILoader()
def add_module(match):
loader.add_module(match.group(1))
return ''
content = YUI_ADDMODULE_RE.sub(add_module, response.content)
def collect(match):
cmd, data = match.groups()
if cmd == 'include':
components.update(data.split())
elif cmd == 'version':
loader.set_version(data)
else:
return '<!-- UNKNOWN COMMAND YUI_%s -->' % cmd
return ''
content = YUI_RE.sub(collect, content)
for component in components:
loader.add_component(component)
tags = loader.render()
if tags:
content, count = YUI_INIT_RE.subn(tags, content, 1)
if count != 1:
content += ('<p>%d YUI init tags found,'
'at least one expected</p>' % count)
response.content = YUI_INIT_RE.sub(
'<!-- WARNING: MULTIPLE YUI init STATEMENTS -->', content)
return response
|
Alexander-P/Isca
|
refs/heads/master
|
src/extra/python/scripts/create_amip_sst_timeseries.py
|
1
|
# -*- coding: utf-8 -*-s
import numpy as np
from calendar_calc import day_number_to_date
from netCDF4 import Dataset, date2num
import pdb
import create_timeseries as cts
import xarray as xar
from mpl_toolkits.basemap import shiftgrid
import matplotlib.pyplot as plt
def add_sst_anomaly(sst_in, anomaly_type=None):
if anomaly_type=='el_nino':
el_nino_dataset_location = '/scratch/sit204/HadISST/hadisst_el_nino_3p4_analysis_with_rolling_mean.nc'
dataset_el_nino = xar.open_dataset(el_nino_dataset_location)
start_month_idx = 472
end_month_idx = 483
sst_anom_orig_order = dataset_el_nino['t_surf_anom'][start_month_idx:(end_month_idx+1),...]
print(('Selecting months between ' + str(dataset_el_nino.time.values[start_month_idx]) + ' to ' + str(dataset_el_nino.time.values[end_month_idx])))
lat_range_keep = [-30., 15.]
lon_range_keep = [-180., -70.]
taper_length = 15.
masked_sst_anom_orig_order = apply_lat_lon_mask(sst_anom_orig_order, lat_range_keep, lon_range_keep, taper_length)
masked_sst_anom_orig_order = masked_sst_anom_orig_order.fillna(0.)
lon_shift = np.around(dataset_el_nino.lon.mean().values/180.)*180.
if lon_shift==0.:
lon_shift_practical = lon_shift+0.001
#Added a little extra so that it doesn't start the grid at -0.5, but rather starts it at +0.5, as in the bcs data.
sst_anom_orig_order_shift_lon,lons = shiftgrid(lon_shift_practical,masked_sst_anom_orig_order,dataset_el_nino.lon.values)
else:
sst_anom_orig_order_shift_lon = masked_sst_anom_orig_order
sst_anom_all_shifted = sst_anom_orig_order_shift_lon[:,::-1,:]
print(' reordering months to line up with input file')
start_month = dataset_el_nino.months.values[start_month_idx]
end_month = dataset_el_nino.months.values[end_month_idx]
sst_anom = np.zeros_like(sst_anom_orig_order)
for month_tick in range(12):
orig_order_idx = np.mod(month_tick - (start_month-1), 12)
sst_anom[month_tick, ...] = sst_anom_all_shifted[orig_order_idx]
sst_with_anomaly = sst_in + sst_anom
return sst_with_anomaly, lons
def apply_lat_lon_mask( unmasked_input, lat_range, lon_range_in, taper_length, power = 5):
width = np.abs(lon_range_in[1]-lon_range_in[0])
central_point = (lon_range_in[1]+lon_range_in[0])/2.
lon_range = [-width/2., width/2.]
zeros_mask = xar.DataArray(np.zeros_like(unmasked_input.values), coords=unmasked_input.coords, dims=unmasked_input.dims)
untapered_mask = xar.DataArray(np.zeros_like(unmasked_input.values), coords=unmasked_input.coords, dims=unmasked_input.dims)
lat_array = unmasked_input.lat.values
lon_array = unmasked_input.lon.values
for lat_tick in range(len(lat_array)):
lat_value = lat_array[lat_tick]
for lon_tick in range(len(lon_array)):
lon_value = lon_array[lon_tick]
if (lat_value < lat_range[1] and lat_value > lat_range[0]) and (lon_value < lon_range[1] and lon_value > lon_range[0]):
zeros_mask[:,lat_tick, lon_tick] = 1.
untapered_mask[:,lat_tick, lon_tick] = 1.
#All of those points inside the un-tapered box have the mask set to 1.
elif (lat_value < lat_range[1]+taper_length and lat_value > lat_range[0]-taper_length) and (lon_value < lon_range[1]+taper_length and lon_value > lon_range[0]-taper_length) and not (lat_value < lat_range[1] and lat_value > lat_range[0]):
min_distance = np.min([np.abs(lat_value-lat_range[1]), np.abs(lat_value - lat_range[0])])
zeros_mask[:,lat_tick, lon_tick] = (1. -((min_distance)/(taper_length)))**power
#This are the points that are within the larger tapered box, but in a latitude band outside of the untapered box.
if not (lon_value < lon_range[1] and lon_value > lon_range[0]) and not (lat_value < lat_range[1] and lat_value > lat_range[0]) :
min_distance = np.min([np.abs(lon_value-lon_range[1]), np.abs(lon_value - lon_range[0])])
zeros_mask[:,lat_tick, lon_tick] = zeros_mask[:,lat_tick, lon_tick] * (1. -((min_distance)/(taper_length)))**power
#These are the corners - so a more longitudinally restricted version of the above
elif (lon_value < lon_range[1]+taper_length and lon_value > lon_range[0]-taper_length) and (lat_value < lat_range[1] and lat_value > lat_range[0]):
min_distance = np.min([np.abs(lon_value-lon_range[1]), np.abs(lon_value - lon_range[0])])
zeros_mask[:,lat_tick, lon_tick] = (1. -((min_distance)/(taper_length)))**power
#The final set of points is within the larger box, but within the latitude range of the original box.
#Added a little extra so that it doesn't start the grid at -0.5, but rather starts it at +0.5, as in the bcs data.
zeros_mask_shifted,lons = shiftgrid(-180.-central_point+0.5, zeros_mask, zeros_mask.lon.values)
untapered_mask_shifted ,lons = shiftgrid(-180.-central_point+0.5, untapered_mask, untapered_mask.lon.values)
final_mask = xar.DataArray(zeros_mask_shifted, coords=unmasked_input.coords, dims=unmasked_input.dims)
final_untapered_mask = xar.DataArray(untapered_mask_shifted, coords=unmasked_input.coords, dims=unmasked_input.dims)
masked_sst = unmasked_input * final_mask
return masked_sst
def main():
base_directory='/scratch/sit204/sst_amip_files/'
amip_data_version='amip_data_version_1_1_0' #s 'amip_data_version_1_1_0' or 'amip_data_version_1_0_0'
output_name_list ={'tosbcs':'sst','siconc':'siconc'}
#Note that we are using the bcs (boundary conditions) input4MIPs files, as instructed.
# The theory is that by using the bcs files (which are mid-month values) the time-average
# of the interpolated bcs files should be equal to the time-average data provided in 'tos'
# files, not the 'tosbcs'. See http://www-pcmdi.llnl.gov/projects/amip/AMIP2EXPDSN/BCS/amip2bcs.php
# and http://www-pcmdi.llnl.gov/projects/amip/AMIP2EXPDSN/BCS/amipbc_dwnld_files/360x180/v1.0.0/nc/readme_nc
add_anomaly = False
# anomaly_type='el_nino'
months_to_include='all'
months_to_include='DJF'
for variable_name in list(output_name_list.keys()):
if amip_data_version=='amip_data_version_1_0_0':
nfiles=50
folder_name='/1950_1999/'
filename_prefix='amipbc_sst_360x180_19'
sst_all=np.zeros((nfiles,12,180,360))
do_annual_mean=True
elif amip_data_version=='amip_data_version_1_1_0':
nfiles=1
folder_name=''
filename_prefix=variable_name+'_input4MIPs_SSTsAndSeaIce_CMIP_PCMDI-AMIP-1-1-0_gs1x1_187001-201512'
do_annual_mean=False
for file_tick in range(nfiles):
if nfiles!=1:
filename=filename_prefix+str(file_tick+50)
else:
filename=filename_prefix
resolution_file = Dataset(base_directory+amip_data_version+'/'+folder_name+'/'+filename+'.nc', 'r')
try:
lons = resolution_file.variables['longitude'][:]
lats = resolution_file.variables['latitude'][:]
except KeyError:
lons = resolution_file.variables['lon'][:]
lats = resolution_file.variables['lat'][:]
sst_in = resolution_file.variables[variable_name][:]
try:
sst_all[file_tick,:,:,:]=sst_in
except NameError:
sst_all=sst_in
except IndexError:
sst_all=sst_in
try:
lonbs = resolution_file.variables['bounds_longitude'][:]
latbs = resolution_file.variables['bounds_latitude'][:]
except KeyError:
lonbs = resolution_file.variables['lon_bnds'][:]
latbs = resolution_file.variables['lat_bnds'][:]
nlon=lons.shape[0]
nlat=lats.shape[0]
nlonb=lonbs.shape[0]
nlatb=latbs.shape[0]
lonbs_adjusted=np.zeros(nlonb+1)
latbs_adjusted=np.zeros(nlatb+1)
lonbs_adjusted[0:nlonb]=lonbs[:,0]
lonbs_adjusted[nlonb]=lonbs[-1,1]
latbs_adjusted[0:nlatb]=latbs[:,0]
latbs_adjusted[nlatb]=latbs[-1,1]
day_number = resolution_file.variables['time'][:]
time_arr = day_number_to_date(day_number, calendar_type = 'gregorian', units_in = 'days since 1870-1-1')
time_arr_adj=np.arange(15,360,30)
annual_mean_name=''
if len(sst_all.shape)==4:
sst_in=np.mean(sst_all,axis=0)
else:
sst_in=np.zeros((12,180,360))
if months_to_include=='all':
for month_tick in np.arange(1,13,1):
month_idx = np.where(time_arr.month==month_tick)[0]
sst_in[month_tick-1,:,:]=np.mean(sst_all[month_idx,:,:],axis=0)
elif months_to_include=='DJF':
djf_idx = np.where(np.logical_or(np.logical_or(time_arr.month==1, time_arr.month==2), time_arr.month==12))
djf_mean = np.mean(sst_all[djf_idx[0],...], axis=0)
for month_tick in np.arange(1,13,1):
sst_in[month_tick-1,...]=djf_mean
annual_mean_name='_djf'
if do_annual_mean:
sst_in_am=np.mean(sst_in,axis=0)
sst_in=np.zeros((12,180,360))
for month_tick in np.arange(1,13,1):
sst_in[month_tick-1,:,:]=sst_in_am
annual_mean_name='_am'
if add_anomaly and variable_name=='tosbcs':
sst_in, shifted_lons = add_sst_anomaly(sst_in, anomaly_type)
anom_name = '_'+anomaly_type
else:
anom_name = ''
p_full=None
p_half=None
npfull=None
nphalf=None
#Find grid and time numbers
ntime=time_arr.day.shape[0]
nlonb=lonbs_adjusted.shape[0]
nlatb=latbs_adjusted.shape[0]
#Output it to a netcdf file.
variable_name=output_name_list[variable_name]+annual_mean_name+'_clim_amip'+anom_name
file_name=variable_name+'_'+amip_data_version+'.nc'
number_dict={}
number_dict['nlat']=nlat
number_dict['nlon']=nlon
number_dict['nlatb']=nlatb
number_dict['nlonb']=nlonb
number_dict['npfull']=npfull
number_dict['nphalf']=nphalf
number_dict['ntime']=ntime
time_units='days since 0000-01-01 00:00:00.0'
cts.output_to_file(sst_in,lats,lons,latbs_adjusted,lonbs_adjusted,p_full,p_half,time_arr_adj,time_units,file_name,variable_name,number_dict)
if __name__=="__main__":
main()
|
gnmiller/craig-bot
|
refs/heads/master
|
craig-bot/lib/python3.6/site-packages/urllib3/util/url.py
|
8
|
from __future__ import absolute_import
import re
from collections import namedtuple
from ..exceptions import LocationParseError
from ..packages import six, rfc3986
from ..packages.rfc3986.exceptions import RFC3986Exception, ValidationError
from ..packages.rfc3986.validators import Validator
from ..packages.rfc3986 import abnf_regexp, normalizers, compat, misc
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ('http', 'https', None)
# Regex for detecting URLs with schemes. RFC 3986 Section 3.1
SCHEME_REGEX = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+\-]*:|/)")
PATH_CHARS = abnf_regexp.UNRESERVED_CHARS_SET | abnf_regexp.SUB_DELIMITERS_SET | {':', '@', '/'}
QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {'?'}
class Url(namedtuple('Url', url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u'://'
if auth is not None:
url += auth + u'@'
if host is not None:
url += host
if port is not None:
url += u':' + str(port)
if path is not None:
url += path
if query is not None:
url += u'?' + query
if fragment is not None:
url += u'#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding='utf-8'):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component. Based on
rfc3986.normalizers.encode_component()
"""
if component is None:
return component
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
percent_encodings = len(normalizers.PERCENT_MATCHER.findall(
compat.to_str(component, encoding)))
uri_bytes = component.encode('utf-8', 'surrogatepass')
is_percent_encoded = percent_encodings == uri_bytes.count(b'%')
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i:i+1]
byte_ord = ord(byte)
if ((is_percent_encoded and byte == b'%')
or (byte_ord < 128 and byte.decode() in allowed_chars)):
encoded_component.extend(byte)
continue
encoded_component.extend('%{0:02x}'.format(byte_ord).encode().upper())
return encoded_component.decode(encoding)
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 compliant.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
is_string = not isinstance(url, six.binary_type)
# RFC 3986 doesn't like URLs that have a host but don't start
# with a scheme and we support URLs like that so we need to
# detect that problem and add an empty scheme indication.
# We don't get hurt on path-only URLs here as it's stripped
# off and given an empty scheme anyways.
if not SCHEME_REGEX.search(url):
url = "//" + url
def idna_encode(name):
if name and any([ord(x) > 128 for x in name]):
try:
import idna
except ImportError:
raise LocationParseError("Unable to parse URL without the 'idna' module")
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
raise LocationParseError(u"Name '%s' is not a valid IDNA label" % name)
return name
try:
split_iri = misc.IRI_MATCHER.match(compat.to_str(url)).groupdict()
iri_ref = rfc3986.IRIReference(
split_iri['scheme'], split_iri['authority'],
_encode_invalid_chars(split_iri['path'], PATH_CHARS),
_encode_invalid_chars(split_iri['query'], QUERY_CHARS),
_encode_invalid_chars(split_iri['fragment'], FRAGMENT_CHARS)
)
has_authority = iri_ref.authority is not None
uri_ref = iri_ref.encode(idna_encoder=idna_encode)
except (ValueError, RFC3986Exception):
return six.raise_from(LocationParseError(url), None)
# rfc3986 strips the authority if it's invalid
if has_authority and uri_ref.authority is None:
raise LocationParseError(url)
# Only normalize schemes we understand to not break http+unix
# or other schemes that don't follow RFC 3986.
if uri_ref.scheme is None or uri_ref.scheme.lower() in NORMALIZABLE_SCHEMES:
uri_ref = uri_ref.normalize()
# Validate all URIReference components and ensure that all
# components that were set before are still set after
# normalization has completed.
validator = Validator()
try:
validator.check_validity_of(
*validator.COMPONENT_NAMES
).validate(uri_ref)
except ValidationError:
return six.raise_from(LocationParseError(url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
path = uri_ref.path
if not path:
if (uri_ref.query is not None
or uri_ref.fragment is not None):
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
def to_input_type(x):
if x is None:
return None
elif not is_string and not isinstance(x, six.binary_type):
return x.encode('utf-8')
return x
return Url(
scheme=to_input_type(uri_ref.scheme),
auth=to_input_type(uri_ref.userinfo),
host=to_input_type(uri_ref.host),
port=int(uri_ref.port) if uri_ref.port is not None else None,
path=to_input_type(path),
query=to_input_type(uri_ref.query),
fragment=to_input_type(uri_ref.fragment)
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
ldts/zephyr
|
refs/heads/evl-latency
|
scripts/kconfig/kconfig.py
|
1
|
#!/usr/bin/env python3
# Modified from: https://github.com/ulfalizer/Kconfiglib/blob/master/examples/merge_config.py
import argparse
import os
import sys
import textwrap
from kconfiglib import Kconfig, BOOL, TRISTATE, TRI_TO_STR
# Warnings that won't be turned into errors (but that will still be printed),
# identified by a substring of the warning. The warning texts from Kconfiglib
# are guaranteed to not change.
WARNING_WHITELIST = (
# Warning generated when a symbol with unsatisfied dependencies is being
# selected. These should be investigated, but whitelist them for now.
"y-selected",
)
def fatal(warning):
# Returns True if 'warning' is not whitelisted and should be turned into an
# error
return not any(wl_warning in warning for wl_warning in WARNING_WHITELIST)
def main():
args = parse_args()
print("Parsing Kconfig tree in " + args.kconfig_root)
kconf = Kconfig(args.kconfig_root, warn_to_stderr=False)
# prj.conf may override settings from the board configuration, so disable
# warnings about symbols being assigned more than once
kconf.disable_override_warnings()
kconf.disable_redun_warnings()
# Warn for assignments to undefined symbols
kconf.enable_undef_warnings()
for i, config in enumerate(args.conf_fragments):
print(("Loading {} as base" if i == 0 else "Merging {}")
.format(config))
# replace=False creates a merged configuration
kconf.load_config(config, replace=False)
# Print warnings for symbols whose actual value doesn't match the assigned
# value
for sym in kconf.unique_defined_syms:
# Was the symbol assigned to? Choice symbols are checked separately.
if sym.user_value is not None and not sym.choice:
verify_assigned_sym_value(sym)
# Print warnings for choices whose actual selection doesn't match the user
# selection
for choice in kconf.unique_choices:
if choice.user_selection:
verify_assigned_choice_value(choice)
# Hack: Force all symbols to be evaluated, to catch warnings generated
# during evaluation. Wait till the end to write the actual output files, so
# that we don't generate any output if there are warnings-turned-errors.
#
# Kconfiglib caches calculated symbol values internally, so this is still
# fast.
kconf.write_config(os.devnull)
# Print warnings ourselves so that we can put a blank line between them for
# readability. We could roll this into the loop below, but it's nice to
# always print all warnings, even if one of them turns out to be fatal.
for warning in kconf.warnings:
print("\n" + warning, file=sys.stderr)
# Turn all warnings except for explicity whitelisted ones into errors. In
# particular, this will turn assignments to undefined Kconfig variables
# into errors.
#
# A warning is generated by this script whenever a symbol gets a different
# value than the one it was assigned. Keep that one as just a warning for
# now as well.
for warning in kconf.warnings:
if fatal(warning):
sys.exit("\n" + textwrap.fill(
"Error: Aborting due to non-whitelisted Kconfig "
"warning '{}'.\nNote: If this warning doesn't point "
"to an actual problem, you can add it to the "
"whitelist at the top of {}."
.format(warning, sys.argv[0]),
100) + "\n")
# Write the merged configuration and the C header
kconf.write_config(args.dotconfig)
print("Configuration written to '{}'".format(args.dotconfig))
kconf.write_autoconf(args.autoconf)
# Write the list of processed Kconfig sources to a file
write_kconfig_filenames(kconf.kconfig_filenames, kconf.srctree, args.sources)
# Message printed when a promptless symbol is assigned (and doesn't get the
# assigned value)
PROMPTLESS_HINT = """
This symbol has no prompt, meaning assignments in configuration files have no
effect on it. It can only be set indirectly, via Kconfig defaults (e.g. in a
Kconfig.defconfig file) or through being 'select'ed or 'imply'd (note: try to
avoid Kconfig 'select's except for trivial promptless "helper" symbols without
dependencies, as it ignores dependencies and forces symbols on)."""
# Message about where to look up symbol information
SYM_INFO_HINT = """
You can check symbol information (including dependencies) in the 'menuconfig'
interface (see the Application Development Primer section of the manual), or in
the Kconfig reference at
http://docs.zephyrproject.org/latest/reference/kconfig/CONFIG_{}.html (which is
updated regularly from the master branch). See the 'Setting configuration
values' section of the Board Porting Guide as well."""
PROMPTLESS_HINT_EXTRA = """
It covers Kconfig.defconfig files."""
def verify_assigned_sym_value(sym):
# Verifies that the value assigned to 'sym' "took" (matches the value the
# symbol actually got), printing a warning otherwise
# Tristate values are represented as 0, 1, 2. Having them as
# "n", "m", "y" is more convenient here, so convert.
if sym.type in (BOOL, TRISTATE):
user_value = TRI_TO_STR[sym.user_value]
else:
user_value = sym.user_value
if user_value != sym.str_value:
msg = "warning: {} was assigned the value '{}' but got the " \
"value '{}'." \
.format(name_and_loc(sym), user_value, sym.str_value)
if promptless(sym): msg += PROMPTLESS_HINT
msg += SYM_INFO_HINT.format(sym.name)
if promptless(sym): msg += PROMPTLESS_HINT_EXTRA
# Use a large fill() width to try to avoid linebreaks in the symbol
# reference link
print("\n" + textwrap.fill(msg, 100), file=sys.stderr)
def verify_assigned_choice_value(choice):
# Verifies that the choice symbol that was selected (by setting it to y)
# ended up as the selection, printing a warning otherwise.
#
# We check choice symbols separately to avoid warnings when two different
# choice symbols within the same choice are set to y. This might happen if
# a choice selection from a board defconfig is overriden in a prj.conf, for
# example. The last choice symbol set to y becomes the selection (and all
# other choice symbols get the value n).
#
# Without special-casing choices, we'd detect that the first symbol set to
# y ended up as n, and print a spurious warning.
if choice.user_selection is not choice.selection:
msg = "warning: the choice symbol {} was selected (set =y), but {} " \
"ended up as the choice selection. {}" \
.format(name_and_loc(choice.user_selection),
name_and_loc(choice.selection) if choice.selection
else "no symbol",
SYM_INFO_HINT.format(choice.user_selection.name))
print("\n" + textwrap.fill(msg, 100), file=sys.stderr)
def name_and_loc(sym):
# Helper for printing the name and Kconfig file location(s) for a symbol
if not sym.nodes:
return sym.name + " (undefined)"
return "{} (defined at {})".format(
sym.name,
", ".join("{}:{}".format(node.filename, node.linenr)
for node in sym.nodes))
def promptless(sym):
# Returns True if 'sym' has no prompt. Since the symbol might be defined in
# multiple locations, we need to check all locations.
return not any(node.prompt for node in sym.nodes)
def write_kconfig_filenames(paths, root_path, output_file_path):
# 'paths' is a list of paths. The list has duplicates and the
# paths are either absolute or relative to 'root_path'.
# We need to write this list, in a format that CMake can easily
# parse, to the output file at 'output_file_path'.
# The written list should also have absolute paths instead of
# relative paths, and it should not have duplicates.
# Remove duplicates
paths_uniq = set(paths)
with open(output_file_path, 'w') as out:
# sort to be deterministic
for path in sorted(paths_uniq):
# Change from relative to absolute path (do nothing for
# absolute paths)
abs_path = os.path.join(root_path, path)
# Assert that the file exists, since it was sourced, it
# must surely also exist.
assert os.path.isfile(abs_path), "Internal error"
out.write("{}\n".format(abs_path))
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("kconfig_root")
parser.add_argument("dotconfig")
parser.add_argument("autoconf")
parser.add_argument("sources")
parser.add_argument("conf_fragments", metavar='conf', type=str, nargs='+')
return parser.parse_args()
if __name__ == "__main__":
main()
|
AthelasPeru/laborapp
|
refs/heads/master
|
app/models/relationships.py
|
1
|
from app.models import db
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('roles.id')))
user_skills = db.Table("user_skills",
db.Column(
"user_id", db.Integer, db.ForeignKey("users.id")),
db.Column(
"skills_id", db.Integer, db.ForeignKey("skills.id"))
)
|
arenadata/ambari
|
refs/heads/branch-adh-1.6
|
ambari-server/src/main/resources/stacks/ADH/1.0/services/HDFS/package/scripts/params_windows.py
|
2
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
#Used in subsequent imports from params
from status_params import *
config = Script.get_config()
hadoop_conf_dir = None
hbase_conf_dir = None
hadoop_home = None
try:
hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
hadoop_home = os.environ["HADOOP_HOME"]
except:
pass
#directories & files
dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
#decomission
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
namenode_id = None
namenode_rpc = None
hostname = config["hostname"]
if dfs_ha_namenode_ids:
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
if hostname in nn_host:
namenode_id = nn_id
namenode_rpc = nn_host
hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
hdfs_user = hadoop_user
grep_exe = "findstr"
name_node_params = default("/commandParams/namenode", None)
service_map = {
"datanode" : datanode_win_service_name,
"journalnode" : journalnode_win_service_name,
"namenode" : namenode_win_service_name,
"secondarynamenode" : snamenode_win_service_name,
"zkfc_slave": zkfc_win_service_name
}
|
sergiopasra/numina
|
refs/heads/master
|
numina/array/nirproc.py
|
3
|
#
# Copyright 2008-2021 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
import math
import numpy
def fowler_array(fowlerdata, ti=0.0, ts=0.0, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, normalize=False):
"""Loop over the first axis applying Fowler processing.
*fowlerdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in Fowler mode (Fowler and Gatley 1991).
The shape of the array must be of the form 2N_p x M x N, with N_p being
the number of pairs in Fowler mode.
The output signal is just the mean value of the differences between the
last N_p values (S_i) and the first N_p values (R-i).
.. math::
S_F = \\frac{1}{N_p}\\sum\\limits_{i=0}^{N_p-1} S_i - R_i
If the source has a radiance F, then the measured signal is equivalent
to:
.. math::
S_F = F T_I - F T_S (N_p -1) = F T_E
being T_I the integration time (*ti*), the time since the first
productive read to the last productive read for a given pixel and T_S the
time between samples (*ts*). T_E is the time between correlated reads
:math:`T_E = T_I - T_S (N_p - 1)`.
The variance of the signnal is the sum of two terms, one for the readout
noise:
.. math::
\\mathrm{var}(S_{F1}) =\\frac{2\sigma_R^2}{N_p}
and other for the photon noise:
.. math::
\\mathrm{var}(S_{F2}) = F T_E - F T_S \\frac{1}{3}(N_p-\\frac{1}{N_p})
= F T_I - F T_S (\\frac{4}{3} N_p -1 - \\frac{1}{3N_p})
:param fowlerdata: Convertible to a 3D numpy.ndarray with first axis even
:param ti: Integration time.
:param ts: Time between samples.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of (signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if ti < 0:
raise ValueError("invalid parameter, ti < 0.0")
if ts < 0:
raise ValueError("invalid parameter, ts < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
fowlerdata = numpy.asarray(fowlerdata)
if fowlerdata.ndim != 3:
raise ValueError('fowlerdata must be 3D')
npairs = fowlerdata.shape[0] // 2
if 2 * npairs != fowlerdata.shape[0]:
raise ValueError('axis-0 in fowlerdata must be even')
# change byteorder
ndtype = fowlerdata.dtype.newbyteorder('=')
fowlerdata = numpy.asarray(fowlerdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(fowlerdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (fowlerdata.shape[1], fowlerdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
raise ValueError('shape of badpixels is not '
'compatible with shape of fowlerdata')
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_fowler_intl(
fowlerdata, ti, ts, gain, ron,
badpixels, saturation, blank,
result, var, npix, mask
)
return result, var, npix, mask
def ramp_array(rampdata, ti, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, nsig=None, normalize=False):
"""Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if ti <= 0:
raise ValueError("invalid parameter, ti <= 0.0")
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
rampdata = numpy.asarray(rampdata)
if rampdata.ndim != 3:
raise ValueError('rampdata must be 3D')
# change byteorder
ndtype = rampdata.dtype.newbyteorder('=')
rampdata = numpy.asarray(rampdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(rampdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (rampdata.shape[1], rampdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
msg = 'shape of badpixels is not compatible with shape of rampdata'
raise ValueError(msg)
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_ramp_intl(
rampdata, ti, gain, ron, badpixels,
saturation, blank, result, var, npix, mask
)
return result, var, npix, mask
# This is not used...
# Old code used to detect cosmic rays in the ramp
def _ramp(data, saturation, dt, gain, ron, nsig):
nsdata = data[data < saturation]
# Finding glitches in the pixels
intervals, glitches = _rglitches(nsdata, gain=gain, ron=ron, nsig=nsig)
vals = numpy.asarray([_slope(nsdata[intls], dt=dt, gain=gain, ron=ron)
for intls in intervals if len(nsdata[intls]) >= 2])
weights = (1.0 / vals[:, 1])
average = numpy.average(vals[:, 0], weights=weights)
variance = 1.0 / weights.sum()
return average, variance, vals[:, 2].sum(), glitches
def _rglitches(nsdata, gain, ron, nsig):
diffs = nsdata[1:] - nsdata[:-1]
psmedian = numpy.median(diffs)
sigma = math.sqrt(abs(psmedian / gain) + 2 * ron * ron)
start = 0
intervals = []
glitches = []
for idx, diff in enumerate(diffs):
if not (psmedian - nsig * sigma < diff < psmedian + nsig * sigma):
intervals.append(slice(start, idx + 1))
start = idx + 1
glitches.append(start)
intervals.append(slice(start, None))
return intervals, glitches
def _slope(nsdata, dt, gain, ron):
if len(nsdata) < 2:
raise ValueError('Two points needed to compute the slope')
nn = len(nsdata)
delt = dt * nn * (nn + 1) * (nn - 1) / 12
ww = numpy.arange(1, nn + 1) - (nn + 1) / 2
final = (ww * nsdata).sum() / delt
# Readout limited case
delt2 = dt * delt
var1 = (ron / gain)**2 / delt2
# Photon limiting case
var2 = (6 * final * (nn * nn + 1)) / (5 * nn * dt * (nn * nn - 1) * gain)
variance = var1 + var2
return final, variance, nn
|
jamesbulpin/xcp-xen-4.1
|
refs/heads/master
|
tools/python/logging/logging-0.4.9.2/test/log_test3.py
|
42
|
#!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests new fileConfig (not yet a complete test).
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.config
def doLog(logger):
logger.debug("Debug")
logger.info("Info")
logger.warning("Warning")
logger.error("Error")
logger.critical("Critical")
def main():
logging.config.fileConfig("log_test3.ini")
logger = logging.getLogger(None)
print "---------------------------------------------------"
print "-- Logging to root; messages appear on console only"
print "---------------------------------------------------"
doLog(logger)
print "----------------------------------------------------------------------"
print "-- Logging to log02; messages appear on console and in file python.log"
print "----------------------------------------------------------------------"
logger = logging.getLogger("log02")
doLog(logger)
print "--------------------------------------------------------------------------"
print "-- Logging to log02.log03; messages appear on console, in file python.log,"
print "-- and at logrecv.py tcp (if running. <= DEBUG messages will not appear)."
print "--------------------------------------------------------------------------"
logger = logging.getLogger("log02.log03")
doLog(logger)
print "-----------------------------------------------------------------------"
print "-- Logging to log02.log03.log04; messages appear only at logrecv.py udp"
print "-- (if running. <= INFO messages will not appear)."
print "-----------------------------------------------------------------------"
logger = logging.getLogger("log02.log03.log04")
doLog(logger)
print "--------------------------------------------------------------------"
print "-- Logging to log02.log03.log04.log05.log06; messages appear at"
print "-- logrecv.py udp (if running. < CRITICAL messages will not appear)."
print "--------------------------------------------------------------------"
logger = logging.getLogger("log02.log03.log04.log05.log06")
doLog(logger)
print "-- All done."
logging.shutdown()
if __name__ == "__main__":
main()
|
cherez/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/dump.py
|
120
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class DumpIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
_TEST = {
'url': 'http://www.dump.com/oneus/',
'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
'info_dict': {
'id': 'oneus',
'ext': 'flv',
'title': "He's one of us.",
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
|
derekjchow/models
|
refs/heads/master
|
research/tcn/eval.py
|
5
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Calculates running validation of TCN models (and baseline comparisons)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from estimators.get_estimator import get_estimator
from utils import util
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_string(
'config_paths', '',
"""
Path to a YAML configuration files defining FLAG values. Multiple files
can be separated by the `#` symbol. Files are merged recursively. Setting
a key in these files is equivalent to setting the FLAG value with
the same name.
""")
tf.flags.DEFINE_string(
'model_params', '{}', 'YAML configuration string for the model parameters.')
tf.app.flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use')
tf.app.flags.DEFINE_string(
'logdir', '/tmp/tcn', 'Directory where to write event logs.')
FLAGS = tf.app.flags.FLAGS
def main(_):
"""Runs main eval loop."""
# Parse config dict from yaml config files / command line flags.
logdir = FLAGS.logdir
config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params)
# Choose an estimator based on training strategy.
estimator = get_estimator(config, logdir)
# Wait for the first checkpoint file to be written.
while not tf.train.latest_checkpoint(logdir):
tf.logging.info('Waiting for a checkpoint file...')
time.sleep(10)
# Run validation.
while True:
estimator.evaluate()
if __name__ == '__main__':
tf.app.run()
|
NemesisRE/ACE3
|
refs/heads/master
|
tools/github_privates_bot.py
|
22
|
#!/usr/bin/env python3
import os
import argparse
from pygithub3 import Github
def main():
gh = Github(user='acemod', repo='ACE3')
pull_requests = gh.pull_requests.list().all()
for request in pull_requests:
files = gh.pull_requests.list_files(request.number).all()
for file in files:
# print file.filename
if '.sqf' in file.filename:
print file
if __name__ == "__main__":
main()
|
SteadyQuad/android_kernel_yotaphone2
|
refs/heads/lollipop5.0
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.