repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
JordanReiter/django-pyodbc | sql_server/pyodbc/base.py | 1 | 14472 | """
MS SQL Server database backend for Django.
"""
try:
import pyodbc as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading pyodbc module: %s" % e)
import re
m = re.match(r'(\d+)\.(\d+)\.(\d+)(?:-beta(\d+))?', Database.version)
vlist = list(m.groups())
if vlist[3] is None: vlist[3] = '9999'
pyodbc_ver = tuple(map(int, vlist))
if pyodbc_ver < (2, 0, 38, 9999):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("pyodbc 2.0.38 or newer is required; you have %s" % Database.version)
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.conf import settings
from django import VERSION as DjangoVersion
if DjangoVersion[:2] == (1,2) :
from django import get_version
version_str = get_version()
if 'SVN' in version_str and int(version_str.split('SVN-')[-1]) < 11952: # django trunk revision 11952 Added multiple database support.
_DJANGO_VERSION = 11
else:
_DJANGO_VERSION = 12
elif DjangoVersion[:2] == (1,1):
_DJANGO_VERSION = 11
elif DjangoVersion[:2] == (1,0):
_DJANGO_VERSION = 10
elif DjangoVersion[0] == 1:
_DJANGO_VERSION = 13
else:
_DJANGO_VERSION = 9
from sql_server.pyodbc.operations import DatabaseOperations
from sql_server.pyodbc.client import DatabaseClient
from sql_server.pyodbc.creation import DatabaseCreation
from sql_server.pyodbc.introspection import DatabaseIntrospection
import os
import warnings
warnings.filterwarnings('error', 'The DATABASE_ODBC.+ is deprecated', DeprecationWarning, __name__, 0)
collation = 'Latin1_General_CI_AS'
if hasattr(settings, 'DATABASE_COLLATION'):
warnings.warn(
"The DATABASE_COLLATION setting is going to be deprecated, use DATABASE_OPTIONS['collation'] instead.",
DeprecationWarning
)
collation = settings.DATABASE_COLLATION
elif 'collation' in settings.DATABASE_OPTIONS:
collation = settings.DATABASE_OPTIONS['collation']
deprecated = (
('DATABASE_ODBC_DRIVER', 'driver'),
('DATABASE_ODBC_DSN', 'dsn'),
('DATABASE_ODBC_EXTRA_PARAMS', 'extra_params'),
)
for old, new in deprecated:
if hasattr(settings, old):
warnings.warn(
"The %s setting is deprecated, use DATABASE_OPTIONS['%s'] instead." % (old, new),
DeprecationWarning
)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
uses_custom_query_class = True
can_use_chunked_reads = False
#uses_savepoints = True
class DatabaseWrapper(BaseDatabaseWrapper):
drv_name = None
driver_needs_utf8 = None
MARS_Connection = False
unicode_results = False
datefirst = 7
# Collations: http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# http://msdn2.microsoft.com/en-us/library/ms179886.aspx
# T-SQL LIKE: http://msdn2.microsoft.com/en-us/library/ms179859.aspx
# Full-Text search: http://msdn2.microsoft.com/en-us/library/ms142571.aspx
# CONTAINS: http://msdn2.microsoft.com/en-us/library/ms187787.aspx
# FREETEXT: http://msdn2.microsoft.com/en-us/library/ms176078.aspx
operators = {
# Since '=' is used not only for string comparision there is no way
# to make it case (in)sensitive. It will simply fallback to the
# database collation.
'exact': '= %s',
'iexact': "= UPPER(%s)",
'contains': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'icontains': "LIKE UPPER(%s) ESCAPE '\\' COLLATE "+ collation,
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'endswith': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'istartswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + collation,
'iendswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + collation,
# TODO: remove, keep native T-SQL LIKE wildcards support
# or use a "compatibility layer" and replace '*' with '%'
# and '.' with '_'
'regex': 'LIKE %s COLLATE ' + collation,
'iregex': 'LIKE %s COLLATE ' + collation,
# TODO: freetext, full-text contains...
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
if 'OPTIONS' in self.settings_dict:
self.MARS_Connection = self.settings_dict['OPTIONS'].get('MARS_Connection', False)
self.datefirst = self.settings_dict['OPTIONS'].get('datefirst', 7)
self.unicode_results = self.settings_dict['OPTIONS'].get('unicode_results', False)
if _DJANGO_VERSION >= 13:
self.features = DatabaseFeatures(self)
else:
raise Exception("The version is %s" % _DJANGO_VERSION)
self.features = DatabaseFeatures()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
if _DJANGO_VERSION >= 12:
self.ops = DatabaseOperations(self)
self.validation = BaseDatabaseValidation(self)
else:
self.ops = DatabaseOperations()
self.validation = BaseDatabaseValidation()
self.connection = None
def _cursor(self):
new_conn = False
settings_dict = self.settings_dict
db_str, user_str, passwd_str, port_str = None, None, None, None
if _DJANGO_VERSION >= 12:
options = settings_dict['OPTIONS']
if settings_dict['NAME']:
db_str = settings_dict['NAME']
if settings_dict['HOST']:
host_str = settings_dict['HOST']
else:
host_str = 'localhost'
if settings_dict['USER']:
user_str = settings_dict['USER']
if settings_dict['PASSWORD']:
passwd_str = settings_dict['PASSWORD']
if settings_dict['PORT']:
port_str = settings_dict['PORT']
else:
options = settings_dict['DATABASE_OPTIONS']
if settings_dict['DATABASE_NAME']:
db_str = settings_dict['DATABASE_NAME']
if settings_dict['DATABASE_HOST']:
host_str = settings_dict['DATABASE_HOST']
else:
host_str = 'localhost'
if settings_dict['DATABASE_USER']:
user_str = settings_dict['DATABASE_USER']
if settings_dict['DATABASE_PASSWORD']:
passwd_str = settings_dict['DATABASE_PASSWORD']
if settings_dict['DATABASE_PORT']:
port_str = settings_dict['DATABASE_PORT']
if self.connection is None:
new_conn = True
if not db_str:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured('You need to specify NAME in your Django settings file.')
cstr_parts = []
if 'driver' in options:
driver = options['driver']
else:
if os.name == 'nt':
driver = 'SQL Server'
else:
driver = 'FreeTDS'
if 'dsn' in options:
cstr_parts.append('DSN=%s' % options['dsn'])
else:
# Only append DRIVER if DATABASE_ODBC_DSN hasn't been set
cstr_parts.append('DRIVER={%s}' % driver)
if os.name == 'nt' or driver == 'FreeTDS' and \
options.get('host_is_server', False):
if port_str:
host_str += ',%s' % port_str
cstr_parts.append('SERVER=%s' % host_str)
else:
cstr_parts.append('SERVERNAME=%s' % host_str)
if user_str:
cstr_parts.append('UID=%s;PWD=%s' % (user_str, passwd_str))
else:
if driver in ('SQL Server', 'SQL Native Client'):
cstr_parts.append('Trusted_Connection=yes')
else:
cstr_parts.append('Integrated Security=SSPI')
cstr_parts.append('DATABASE=%s' % db_str)
if self.MARS_Connection:
cstr_parts.append('MARS_Connection=yes')
if 'extra_params' in options:
cstr_parts.append(options['extra_params'])
connstr = ';'.join(cstr_parts)
autocommit = options.get('autocommit', False)
if self.unicode_results:
self.connection = Database.connect(connstr, \
autocommit=autocommit, \
unicode_results='True')
else:
self.connection = Database.connect(connstr, \
autocommit=autocommit)
connection_created.send(sender=self.__class__)
cursor = self.connection.cursor()
if new_conn:
# Set date format for the connection. Also, make sure Sunday is
# considered the first day of the week (to be consistent with the
# Django convention for the 'week_day' Django lookup) if the user
# hasn't told us otherwise
cursor.execute("SET DATEFORMAT ymd; SET DATEFIRST %s" % self.datefirst)
if self.ops._get_sql_server_ver(self.connection) < 2005:
self.creation.data_types['TextField'] = 'ntext'
if self.driver_needs_utf8 is None:
self.driver_needs_utf8 = True
self.drv_name = self.connection.getinfo(Database.SQL_DRIVER_NAME).upper()
if self.drv_name in ('SQLSRV32.DLL', 'SQLNCLI.DLL', 'SQLNCLI10.DLL'):
self.driver_needs_utf8 = False
# http://msdn.microsoft.com/en-us/library/ms131686.aspx
if self.ops._get_sql_server_ver(self.connection) >= 2005 and self.drv_name in ('SQLNCLI.DLL', 'SQLNCLI10.DLL') and self.MARS_Connection:
# How to to activate it: Add 'MARS_Connection': True
# to the DATABASE_OPTIONS dictionary setting
self.features.can_use_chunked_reads = True
# FreeTDS can't execute some sql queries like CREATE DATABASE etc.
# in multi-statement, so we need to commit the above SQL sentence(s)
# to avoid this
if self.drv_name.startswith('LIBTDSODBC') and not self.connection.autocommit:
self.connection.commit()
return CursorWrapper(cursor, self.driver_needs_utf8)
class CursorWrapper(object):
"""
A wrapper around the pyodbc's cursor that takes in account a) some pyodbc
DB-API 2.0 implementation and b) some common ODBC driver particularities.
"""
def __init__(self, cursor, driver_needs_utf8):
self.cursor = cursor
self.driver_needs_utf8 = driver_needs_utf8
self.last_sql = ''
self.last_params = ()
def format_sql(self, sql, n_params=None):
if self.driver_needs_utf8 and isinstance(sql, unicode):
# FreeTDS (and other ODBC drivers?) doesn't support Unicode
# yet, so we need to encode the SQL clause itself in utf-8
sql = sql.encode('utf-8')
# pyodbc uses '?' instead of '%s' as parameter placeholder.
if n_params is not None:
sql = sql % tuple('?' * n_params)
else:
if '%s' in sql:
sql = sql.replace('%s', '?')
return sql
def format_params(self, params):
fp = []
for p in params:
if isinstance(p, unicode):
if self.driver_needs_utf8:
# FreeTDS (and other ODBC drivers?) doesn't support Unicode
# yet, so we need to encode parameters in utf-8
fp.append(p.encode('utf-8'))
else:
fp.append(p)
elif isinstance(p, str):
if self.driver_needs_utf8:
# TODO: use system encoding when calling decode()?
fp.append(p.decode('utf-8').encode('utf-8'))
else:
fp.append(p)
elif isinstance(p, type(True)):
if p:
fp.append(1)
else:
fp.append(0)
else:
fp.append(p)
return tuple(fp)
def execute(self, sql, params=()):
self.last_sql = sql
sql = self.format_sql(sql, len(params))
params = self.format_params(params)
self.last_params = params
return self.cursor.execute(sql, params)
def executemany(self, sql, params_list):
sql = self.format_sql(sql)
# pyodbc's cursor.executemany() doesn't support an empty param_list
if not params_list:
if '?' in sql:
return
else:
raw_pll = params_list
params_list = [self.format_params(p) for p in raw_pll]
return self.cursor.executemany(sql, params_list)
def format_results(self, rows):
"""
Decode data coming from the database if needed and convert rows to tuples
(pyodbc Rows are not sliceable).
"""
if not self.driver_needs_utf8:
return tuple(rows)
# FreeTDS (and other ODBC drivers?) doesn't support Unicode
# yet, so we need to decode utf-8 data coming from the DB
fr = []
for row in rows:
if isinstance(row, str):
fr.append(row.decode('utf-8'))
else:
fr.append(row)
return tuple(fr)
def fetchone(self):
row = self.cursor.fetchone()
if row is not None:
return self.format_results(row)
return []
def fetchmany(self, chunk):
return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
def fetchall(self):
return [self.format_results(row) for row in self.cursor.fetchall()]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor) | bsd-3-clause |
timlegrand/giterm | src/giterm/cursutils.py | 1 | 1232 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import curses
import pdb
import sys
import time
screen = None
def init(stdscr):
global screen
screen = stdscr
def finalize(stdscr=None):
if not stdscr and not screen:
raise Exception('either call init() first or provide a window object')
stdscr = screen if screen and not stdscr else stdscr
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
def debug(stdscr=None):
if not stdscr and not screen:
raise Exception('either call init() first or provide a window object')
stdscr = screen if screen and not stdscr else stdscr
finalize(stdscr)
debugger = pdb.Pdb()
debugger.reset()
debugger.do_where(None)
users_frame = sys._getframe().f_back # One frame up, outside this function
debugger.interaction(users_frame, None)
def log(msg):
with open('../giterm.log', 'a') as f:
full_msg = '{:<18}'.format(str(time.time())) + ': ' + str(msg)
full_msg = full_msg + '\n' if full_msg[-1] != '\n' else full_msg
f.write(full_msg)
# Use with:
# import cursutils
# cursutils.init(stdscr) # where stdscr is a `curses` Window object
# cursutils.debug()
| bsd-2-clause |
CDE-UNIBE/qcat | apps/qcat/management/commands/memory_profile.py | 1 | 5420 | import subprocess
from pathlib import Path
from django.core.management.base import BaseCommand
from django.db.models import Avg, Sum, Max
from django.utils.dateparse import parse_datetime
from django.utils.timezone import make_aware
from tabulate import tabulate
from qcat.models import MemoryLog
class Command(BaseCommand):
help = 'Read log files and show some metrics.'
# Delimiter in the log files
delimiter = ';'
# Glob pattern for log file names
cache_file_name = 'caches.log*'
# Number of results to display
slice_size = 10
# temporary file path, to store downloaded logs
tmp = '/tmp/qcat-logs'
def add_arguments(self, parser):
parser.add_argument(
'--no-truncate',
dest='no-truncate',
action='store_true',
default=False,
help='Do not truncate db if a path for log files is given!'
)
parser.add_argument(
'--download-logs',
dest='download',
default='',
help='SSH path (user@server:/path/to/logs/) to fetch logs from. They are stored in the '
'folder as defined in self.tmp (/tmp/qcat-logs).'
)
parser.add_argument(
'--path',
dest='path',
default='',
help='Path to folder containing log files'
)
def handle(self, *args, **options):
if options['download']:
self.download_logs(ssh_cmd=options['download'])
options['path'] = f'{self.tmp}'
if options['path']:
if not options['no-truncate']:
self.truncate_logs_in_db()
self.save_logs_to_db(path=options['path'])
self.display_stats()
def download_logs(self, ssh_cmd):
subprocess.call(
args=f'rsync -avz --delete -e "ssh" {ssh_cmd} {self.tmp}',
shell=True
)
def save_logs_to_db(self, path):
"""
Read log files and save them to the DB for easy AVG, SUM and stuff.
"""
log_files = Path(path).glob(self.cache_file_name)
for log in log_files:
with log.open() as f:
print(f'Importing {log.name}')
self.parse_lines(*f.readlines())
@property
def titles(self):
"""
Model field names without the ID field.
"""
return [field.name for field in MemoryLog._meta.get_fields()[1:]]
def parse_lines(self, *lines):
"""
Split given lines according to delimiter, and prepare model row generation.
"""
memory_logs = []
for line in lines:
attrs = {}
for index, param in enumerate(line.split(self.delimiter)):
# Read datetime from string. Not the nicest approach, but the log is always 'info',
# so it starts at position 5.
if index is 0:
param = make_aware(parse_datetime(param[5:21]))
attrs[self.titles[index]] = param
memory_logs.append(MemoryLog(**attrs))
MemoryLog.objects.bulk_create(memory_logs)
@staticmethod
def truncate_logs_in_db():
MemoryLog.objects.all().delete()
def display_stats(self):
"""
Show:
- largest absolute increments
- largest avg increments
- largest sum of increments
"""
self.display_largest_increments()
self.display_largest_distinct_increments()
self.display_average_increments()
self.display_sum_increments()
def display_largest_increments(self):
qs = MemoryLog.objects.values(
'params', 'increment'
).order_by(
'-increment'
)
self.print_rows(
title='Largest absolute (single) increments',
queryset=qs
)
def display_largest_distinct_increments(self):
qs = MemoryLog.objects.values(
'params'
).annotate(
Max('increment')
).order_by(
'-increment'
)
self.print_rows(
title='Largest absolute (single) distinct increments',
queryset=qs
)
def display_average_increments(self):
qs = MemoryLog.objects.values(
'params'
).annotate(
Avg('increment')
).order_by(
'-increment__avg'
)
self.print_rows(
title='Highest average increments',
queryset=qs
)
def display_sum_increments(self):
qs = MemoryLog.objects.values(
'params'
).annotate(
Sum('increment')
).order_by(
'-increment__sum'
)
self.print_rows(
title='Highest sum of increments',
queryset=qs
)
def print_rows(self, title, queryset):
print('\n')
print(title.upper())
rows = []
for item in queryset[0:self.slice_size]:
# Use 'values', as dict keys may vary (increment, increment__sum, ...)
values = list(item.values())
# Meh - cast increment to size in MB.
values[1] = int(values[1]) >> 20
rows.append(values)
print(tabulate(
tabular_data=rows,
headers=['Params', 'Increment (MB)'],
tablefmt='grid')
)
print('\n')
| apache-2.0 |
mitodl/micromasters | micromasters/envs_test.py | 1 | 3916 | """Tests for environment variable parsing functions"""
from unittest.mock import patch
import pytest
from micromasters.envs import (
EnvironmentVariableParseException,
get_any,
get_bool,
get_int,
get_list_of_str,
get_string,
)
FAKE_ENVIRONS = {
'true': 'True',
'false': 'False',
'positive': '123',
'negative': '-456',
'zero': '0',
'float': '1.1',
'expression': '123-456',
'none': 'None',
'string': 'a b c d e f g',
'list_of_int': '[3,4,5]',
'list_of_str': '["x", "y", \'z\']',
}
def test_get_any():
"""
get_any should parse an environment variable into a bool, int, or a string
"""
expected = {
'true': True,
'false': False,
'positive': 123,
'negative': -456,
'zero': 0,
'float': '1.1',
'expression': '123-456',
'none': 'None',
'string': 'a b c d e f g',
'list_of_int': '[3,4,5]',
'list_of_str': '["x", "y", \'z\']',
}
with patch('micromasters.envs.os', environ=FAKE_ENVIRONS):
for key, value in expected.items():
assert get_any(key, 'default') == value
assert get_any('missing', 'default') == 'default'
def test_get_string():
"""
get_string should get the string from the environment variable
"""
with patch('micromasters.envs.os', environ=FAKE_ENVIRONS):
for key, value in FAKE_ENVIRONS.items():
assert get_string(key, 'default') == value
assert get_string('missing', 'default') == 'default'
assert get_string('missing', 'default') == 'default'
def test_get_int():
"""
get_int should get the int from the environment variable, or raise an exception if it's not parseable as an int
"""
with patch('micromasters.envs.os', environ=FAKE_ENVIRONS):
assert get_int('positive', 1234) == 123
assert get_int('negative', 1234) == -456
assert get_int('zero', 1234) == 0
for key, value in FAKE_ENVIRONS.items():
if key not in ('positive', 'negative', 'zero'):
with pytest.raises(EnvironmentVariableParseException) as ex:
get_int(key, 1234)
assert ex.value.args[0] == 'Expected value in {key}={value} to be an int'.format(
key=key,
value=value,
)
assert get_int('missing', 'default') == 'default'
def test_get_bool():
"""
get_bool should get the bool from the environment variable, or raise an exception if it's not parseable as a bool
"""
with patch('micromasters.envs.os', environ=FAKE_ENVIRONS):
assert get_bool('true', 1234) is True
assert get_bool('false', 1234) is False
for key, value in FAKE_ENVIRONS.items():
if key not in ('true', 'false'):
with pytest.raises(EnvironmentVariableParseException) as ex:
get_bool(key, 1234)
assert ex.value.args[0] == 'Expected value in {key}={value} to be a boolean'.format(
key=key,
value=value,
)
assert get_int('missing', 'default') == 'default'
def test_get_list_of_str():
"""
get_list_of_str should parse a list of strings
"""
with patch('micromasters.envs.os', environ=FAKE_ENVIRONS):
assert get_list_of_str('list_of_str', ['noth', 'ing']) == ['x', 'y', 'z']
for key, value in FAKE_ENVIRONS.items():
if key != 'list_of_str':
with pytest.raises(EnvironmentVariableParseException) as ex:
get_list_of_str(key, ['noth', 'ing'])
assert ex.value.args[0] == 'Expected value in {key}={value} to be a list of str'.format(
key=key,
value=value,
)
assert get_list_of_str('missing', 'default') == 'default'
| bsd-3-clause |
mSenyor/sl4a | python/src/Lib/xml/etree/__init__.py | 183 | 1604 | # $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
| apache-2.0 |
40223119/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/tokenize.py | 728 | 24424 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
import re
import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except IOError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| agpl-3.0 |
escaped/django-video-encoding | video_encoding/files.py | 1 | 1231 | from django.core.files import File
from .backends import get_backend
from .utils import get_local_path
class VideoFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with videos.
"""
def _get_width(self):
"""
Returns video width in pixels.
"""
return self._get_video_info().get('width', 0)
width = property(_get_width)
def _get_height(self):
"""
Returns video height in pixels.
"""
return self._get_video_info().get('height', 0)
height = property(_get_height)
def _get_duration(self):
"""
Returns duration in seconds.
"""
return self._get_video_info().get('duration', 0)
duration = property(_get_duration)
def _get_video_info(self):
"""
Returns basic information about the video as dictionary.
"""
if not hasattr(self, '_info_cache'):
encoding_backend = get_backend()
with get_local_path(self) as local_path:
info_cache = encoding_backend.get_media_info(local_path)
self._info_cache = info_cache
return self._info_cache
| bsd-3-clause |
Fireblend/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
GinnyN/towerofdimensions-django | django/contrib/gis/db/models/sql/query.py | 379 | 5314 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
ALL_TERMS = dict([(x, None) for x in (
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
)])
ALL_TERMS.update(sql.constants.QUERY_TERMS)
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att : value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att : value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
return value
def get_aggregation(self, using):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField): return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
| bsd-3-clause |
BeegorMif/HTPC-Manager | tornado/test/locale_test.py | 57 | 2170 | from __future__ import absolute_import, division, print_function, with_statement
import datetime
import os
import tornado.locale
from tornado.escape import utf8
from tornado.test.util import unittest
from tornado.util import u, unicode_type
class TranslationLoaderTest(unittest.TestCase):
# TODO: less hacky way to get isolated tests
SAVE_VARS = ['_translations', '_supported_locales', '_use_gettext']
def clear_locale_cache(self):
if hasattr(tornado.locale.Locale, '_cache'):
del tornado.locale.Locale._cache
def setUp(self):
self.saved = {}
for var in TranslationLoaderTest.SAVE_VARS:
self.saved[var] = getattr(tornado.locale, var)
self.clear_locale_cache()
def tearDown(self):
for k, v in self.saved.items():
setattr(tornado.locale, k, v)
self.clear_locale_cache()
def test_csv(self):
tornado.locale.load_translations(
os.path.join(os.path.dirname(__file__), 'csv_translations'))
locale = tornado.locale.get("fr_FR")
self.assertTrue(isinstance(locale, tornado.locale.CSVLocale))
self.assertEqual(locale.translate("school"), u("\u00e9cole"))
def test_gettext(self):
tornado.locale.load_gettext_translations(
os.path.join(os.path.dirname(__file__), 'gettext_translations'),
"tornado_test")
locale = tornado.locale.get("fr_FR")
self.assertTrue(isinstance(locale, tornado.locale.GettextLocale))
self.assertEqual(locale.translate("school"), u("\u00e9cole"))
class LocaleDataTest(unittest.TestCase):
def test_non_ascii_name(self):
name = tornado.locale.LOCALE_NAMES['es_LA']['name']
self.assertTrue(isinstance(name, unicode_type))
self.assertEqual(name, u('Espa\u00f1ol'))
self.assertEqual(utf8(name), b'Espa\xc3\xb1ol')
class EnglishTest(unittest.TestCase):
def test_format_date(self):
locale = tornado.locale.get('en_US')
date = datetime.datetime(2013, 4, 28, 18, 35)
self.assertEqual(locale.format_date(date, full_format=True),
'April 28, 2013 at 6:35 pm')
| gpl-3.0 |
Palasekmar/kaira | gui/datatypes.py | 8 | 9518 | #
# Copyright (C) 2013, 2014 Martin Surkovsky
# Copyright (C) 2013 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import csv
import gtk
import gtkutils
import settingswindow
import runview
import numpy as np
from tracelog import TraceLog
from table import Table
"""Supported types for extensions."""
types_repository = []
class Type(object):
"""This class serves as a basic class for all data types."""
def __init__(self, name, short_name=None):
"""Initialize of type of types.
Arguments:
name -- a full name of a data type
Keywords:
short_name -- shorter version of a name (default: None)
"""
self.name = name
if short_name is None:
self.short_name = name
else:
self.short_name = short_name
"""a dictionary with registered loaders for a specific file extension
(file extension: load function)
"""
self.loaders = {}
"""a dictionary with registered savers for a specific file extension
(file extension: save function)
"""
self.savers = {}
self.default_saver = None
def get_view(self, data, app):
"""Return a widget width visualized data or None if the visualization
is not implemented.
Arguments:
data -- data for visualization
app -- a reference to the main application
"""
return None
def register_load_function(self, suffix, function):
"""Register a loading function to a file suffix.
Arguments:
suffix -- a suffix of a filename (it specifies a type of data)
function -- function which can load a files with given suffix
"""
self.loaders[suffix] = function
def register_store_function(self, suffix, function, default=False):
"""Register a saving function to a file suffix.
Arguments:
suffix -- a suffix of a filename (it specifies a type of data)
function -- function which can store data to file with given suffix
default -- specify whether the given function is default saver or not
"""
self.savers[suffix] = function
if default or self.default_saver is None:
self.default_saver = suffix
# *****************************************************************************
# module functions
def get_type_by_suffix(suffix):
for type in types_repository:
if suffix in type.loaders:
return type
return None
def get_loader_by_suffix(suffix):
for type in types_repository:
loader = type.loaders.get(suffix)
if loader is not None:
return loader
return None
def get_saver_by_suffix(suffix):
for type in types_repository:
saver = type.savers.get(suffix)
if saver is not None:
return saver
return None
def get_load_file_filters():
all_supported_types = gtk.FileFilter()
all_supported_types.set_name("All supported files")
result = [ all_supported_types ]
for type in types_repository:
patterns = [ "*." + s for s in type.loaders.keys() ]
filter = gtk.FileFilter()
filter.set_name("{0} ({1})".format(type.short_name, ", ".join(patterns)))
result.append(filter)
for pattern in patterns:
filter.add_pattern(pattern)
all_supported_types.add_pattern(pattern)
return result
def get_save_file_filter(type):
patterns = [ "*." + s for s in type.savers.keys() ]
filter = gtk.FileFilter()
filter.set_name("{0} ({1})".format(type.short_name, ", ".join(patterns)))
for pattern in patterns:
filter.add_pattern(pattern)
return filter
# *****************************************************************************
# supported types
# -----------------------------------------------------------------------------
# Tracelog type
t_tracelog = Type("Kaira tracelog", "Tracelog")
def load_kth(filename, app, settings=None):
def load_tracelog():
tracelog = TraceLog(filename, True)
if tracelog.missed_receives > 0:
app.console_write(
"{1} mismatched receives were found in tracelog {0}.\n" \
.format(filename, tracelog.missed_receives),
"warn")
return tracelog
return (app._catch_io_error(load_tracelog), settings)
t_tracelog.register_load_function("kth", load_kth)
def tracelog_view(data, app):
return runview.RunView(app, data)
t_tracelog.get_view = tracelog_view
types_repository.append(t_tracelog)
# -----------------------------------------------------------------------------
# Table type
t_table = Type("Table")
def show_csv_settings_dialog(parent_window):
sw = settingswindow.SettingWidget()
sw.add_combobox("delimiter",
"Delimiter",
[("Tab", "\t"), ("Comma", ","),
("Semicolon", ";"), ("Space", " ")],
default=1)
sw.add_combobox("quotechar",
"Quote char",
[("Single quotes", "\'"), ("Double quotes", "\"")],
default=1)
sw.add_radiobuttons("header",
"Header",
[("With header", True), ("Without header", False)],
default=0,
ncols=2)
sw.add_radiobuttons("types",
"Types",
[("With types", True), ("Without types", False)],
default=0,
ncols=2)
dialog = settingswindow.BasicSettingDialog(sw, "Setting", parent_window)
dialog.set_size_request(400, 250)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button(gtk.STOCK_OK, gtk.RESPONSE_OK, True)
response = dialog.run()
if response == gtk.RESPONSE_OK:
dialog.destroy()
delimiter = dialog.get_setting("delimiter")
quotechar = dialog.get_setting("quotechar")
has_header = dialog.get_setting("header")
has_types = dialog.get_setting("types")
return (delimiter, quotechar, has_header, has_types)
dialog.destroy()
return None
def load_csv(filename, app, settings):
if settings is None:
settings = show_csv_settings_dialog(app.window)
if settings is None:
return (None, None) # settings was canceled
delimiter, quotechar, has_header, has_types = settings
with open(filename, "rb") as csvfile:
csvreader = csv.reader(
csvfile, delimiter=delimiter, quotechar=quotechar)
try:
types = None
if has_types:
types = csvreader.next()
header = None
if has_header:
header = csvreader.next()
row = csvreader.next() # first row with data
except StopIteration:
table = Table([("V0", "object")], 0)
return (table, None);
if types is None:
types = ["object"] * len(row)
if header is None:
header = ["V {0}".format(i + 1) for i in range(len(row))]
cols_description = zip(header, types)
table = Table(cols_description, 100)
row = [None if value == '' else value for value in row]
table.add_row(row) # add the first loaded row with data
for row in csvreader:
row = [None if value == '' else value for value in row]
table.add_row(row);
table.trim()
return (table, settings)
t_table.register_load_function("csv", load_csv)
def store_csv(table, filename, app, settings):
if settings is None:
settings = show_csv_settings_dialog(app.window)
if settings is None:
return (False, None)
delimiter, quotechar, has_header, has_types = settings
with open(filename, "w") as csvfile:
csvwriter = csv.writer(
csvfile, delimiter=delimiter, quotechar=quotechar)
if has_types:
csvwriter.writerow(table.types)
if has_header:
csvwriter.writerow(table.header)
for row in table:
csvwriter.writerow(row)
return (True, settings)
t_table.register_store_function("csv", store_csv)
def csv_view(table, app):
colnames = [(title, str) for title in table.header]
view = gtkutils.SimpleList(colnames)
idx = 1
for row in table:
try:
view.append(row)
idx += 1
except ValueError:
msg = ("Row sequence has wrong length. It must have {0} items"
" instead of {1}.\nThe problem row is index is {2}.".
format(len(table.header), len(row), idx))
app.show_message_dialog(msg, gtk.MESSAGE_WARNING)
return view
t_table.get_view = csv_view
types_repository.append(t_table)
| gpl-3.0 |
hyperized/ansible | lib/ansible/modules/network/radware/vdirect_runnable.py | 20 | 12919 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Radware LTD.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
module: vdirect_runnable
author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk)
short_description: Runs templates and workflow actions in Radware vDirect server
description:
- Runs configuration templates, creates workflows and runs workflow actions in Radware vDirect server.
notes:
- Requires the Radware vdirect-client Python package on the host. This is as easy as
C(pip install vdirect-client)
version_added: "2.5"
options:
vdirect_ip:
description:
- Primary vDirect server IP address, may be set as C(VDIRECT_IP) environment variable.
required: true
vdirect_user:
description:
- vDirect server username, may be set as C(VDIRECT_USER) environment variable.
required: true
vdirect_password:
description:
- vDirect server password, may be set as C(VDIRECT_PASSWORD) environment variable.
required: true
vdirect_secondary_ip:
description:
- Secondary vDirect server IP address, may be set as C(VDIRECT_SECONDARY_IP) environment variable.
vdirect_wait:
description:
- Wait for async operation to complete, may be set as C(VDIRECT_WAIT) environment variable.
type: bool
default: 'yes'
vdirect_https_port:
description:
- vDirect server HTTPS port number, may be set as C(VDIRECT_HTTPS_PORT) environment variable.
default: 2189
vdirect_http_port:
description:
- vDirect server HTTP port number, may be set as C(VDIRECT_HTTP_PORT) environment variable.
default: 2188
vdirect_timeout:
description:
- Amount of time to wait for async operation completion [seconds],
- may be set as C(VDIRECT_TIMEOUT) environment variable.
default: 60
vdirect_use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection,
- may be set as C(VDIRECT_HTTPS) or C(VDIRECT_USE_SSL) environment variable.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated,
- may be set as C(VDIRECT_VALIDATE_CERTS) or C(VDIRECT_VERIFY) environment variable.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
aliases: [ vdirect_validate_certs ]
runnable_type:
description:
- vDirect runnable type.
required: true
choices: ['ConfigurationTemplate', 'Workflow', 'WorkflowTemplate']
runnable_name:
description:
- vDirect runnable name to run.
- May be configuration template name, workflow template name or workflow instance name.
required: true
action_name:
description:
- Workflow action name to run.
- Required if I(runnable_type=Workflow).
parameters:
description:
- Action parameters dictionary. In case of C(ConfigurationTemplate) runnable type,
- the device connection details should always be passed as a parameter.
requirements:
- "vdirect-client >= 4.1.1"
'''
EXAMPLES = '''
- name: vdirect_runnable
vdirect_runnable:
vdirect_ip: 10.10.10.10
vdirect_user: vDirect
vdirect_password: radware
runnable_type: ConfigurationTemplate
runnable_name: get_vlans
parameters: {'vlans_needed':1,'adc':[{'type':'Adc','name':'adc-1'}]}
'''
RETURN = '''
result:
description: Message detailing run result
returned: success
type: str
sample: "Workflow action run completed."
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from vdirect_client import rest_client
HAS_REST_CLIENT = True
except ImportError:
HAS_REST_CLIENT = False
CONFIGURATION_TEMPLATE_RUNNABLE_TYPE = 'ConfigurationTemplate'
WORKFLOW_TEMPLATE_RUNNABLE_TYPE = 'WorkflowTemplate'
WORKFLOW_RUNNABLE_TYPE = 'Workflow'
TEMPLATE_SUCCESS = 'Configuration template run completed.'
WORKFLOW_CREATION_SUCCESS = 'Workflow created.'
WORKFLOW_ACTION_SUCCESS = 'Workflow action run completed.'
meta_args = dict(
vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])),
vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])),
vdirect_password=dict(
required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']),
no_log=True, type='str'),
vdirect_secondary_ip=dict(
required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']),
default=None),
vdirect_use_ssl=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']),
default=True, type='bool'),
vdirect_wait=dict(
required=False, fallback=(env_fallback, ['VDIRECT_WAIT']),
default=True, type='bool'),
vdirect_timeout=dict(
required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']),
default=60, type='int'),
validate_certs=dict(
required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']),
default=True, type='bool', aliases=['vdirect_validate_certs']),
vdirect_https_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']),
default=2189, type='int'),
vdirect_http_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']),
default=2188, type='int'),
runnable_type=dict(
required=True,
choices=[CONFIGURATION_TEMPLATE_RUNNABLE_TYPE, WORKFLOW_TEMPLATE_RUNNABLE_TYPE, WORKFLOW_RUNNABLE_TYPE]),
runnable_name=dict(required=True),
action_name=dict(required=False, default=None),
parameters=dict(required=False, type='dict', default={})
)
class RunnableException(Exception):
def __init__(self, reason, details):
self.reason = reason
self.details = details
def __str__(self):
return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details)
class WrongActionNameException(RunnableException):
def __init__(self, action, available_actions):
super(WrongActionNameException, self).__init__('Wrong action name ' + repr(action),
'Available actions are: ' + repr(available_actions))
class MissingActionParametersException(RunnableException):
def __init__(self, required_parameters):
super(MissingActionParametersException, self).__init__(
'Action parameters missing',
'Required parameters are: ' + repr(required_parameters))
class MissingRunnableException(RunnableException):
def __init__(self, name):
super(MissingRunnableException, self).__init__(
'Runnable missing',
'Runnable ' + name + ' is missing')
class VdirectRunnable(object):
CREATE_WORKFLOW_ACTION = 'createWorkflow'
RUN_ACTION = 'run'
def __init__(self, params):
self.client = rest_client.RestClient(params['vdirect_ip'],
params['vdirect_user'],
params['vdirect_password'],
wait=params['vdirect_wait'],
secondary_vdirect_ip=params['vdirect_secondary_ip'],
https_port=params['vdirect_https_port'],
http_port=params['vdirect_http_port'],
timeout=params['vdirect_timeout'],
https=params['vdirect_use_ssl'],
verify=params['validate_certs'])
self.params = params
self.type = self.params['runnable_type']
self.name = self.params['runnable_name']
if 'parameters' in self.params:
self.action_params = self.params['parameters']
else:
self.action_params = []
def _validate_runnable_exists(self):
res = self.client.runnable.get_runnable_objects(self.type)
runnable_names = res[rest_client.RESP_DATA]['names']
if self.name not in runnable_names:
raise MissingRunnableException(self.name)
def _validate_action_name(self):
if self.type == WORKFLOW_TEMPLATE_RUNNABLE_TYPE:
self.action_name = VdirectRunnable.CREATE_WORKFLOW_ACTION
elif self.type == CONFIGURATION_TEMPLATE_RUNNABLE_TYPE:
self.action_name = VdirectRunnable.RUN_ACTION
else:
self.action_name = self.params['action_name']
res = self.client.runnable.get_available_actions(self.type, self.name)
available_actions = res[rest_client.RESP_DATA]['names']
if self.action_name not in available_actions:
raise WrongActionNameException(self.action_name, available_actions)
def _validate_required_action_params(self):
action_params_names = [n for n in self.action_params]
res = self.client.runnable.get_action_info(self.type, self.name, self.action_name)
if 'parameters' in res[rest_client.RESP_DATA]:
action_params_spec = res[rest_client.RESP_DATA]['parameters']
else:
action_params_spec = []
required_action_params_dict = [{'name': p['name'], 'type': p['type']} for p in action_params_spec
if p['type'] == 'alteon' or
p['type'] == 'defensePro' or
p['type'] == 'appWall' or
p['direction'] != 'out']
required_action_params_names = [n['name'] for n in required_action_params_dict]
if set(required_action_params_names) & set(action_params_names) != set(required_action_params_names):
raise MissingActionParametersException(required_action_params_dict)
def run(self):
self._validate_runnable_exists()
self._validate_action_name()
self._validate_required_action_params()
data = self.action_params
result = self.client.runnable.run(data, self.type, self.name, self.action_name)
result_to_return = {'msg': ''}
if result[rest_client.RESP_STATUS] == 200:
if result[rest_client.RESP_DATA]['success']:
if self.type == WORKFLOW_TEMPLATE_RUNNABLE_TYPE:
result_to_return['msg'] = WORKFLOW_CREATION_SUCCESS
elif self.type == CONFIGURATION_TEMPLATE_RUNNABLE_TYPE:
result_to_return['msg'] = TEMPLATE_SUCCESS
else:
result_to_return['msg'] = WORKFLOW_ACTION_SUCCESS
if 'parameters' in result[rest_client.RESP_DATA]:
result_to_return['parameters'] = result[rest_client.RESP_DATA]['parameters']
else:
if 'exception' in result[rest_client.RESP_DATA]:
raise RunnableException(result[rest_client.RESP_DATA]['exception']['message'],
result[rest_client.RESP_STR])
else:
raise RunnableException('The status returned ' + str(result[rest_client.RESP_DATA]['status']),
result[rest_client.RESP_STR])
else:
raise RunnableException(result[rest_client.RESP_REASON],
result[rest_client.RESP_STR])
return result_to_return
def main():
module = AnsibleModule(argument_spec=meta_args,
required_if=[['runnable_type', WORKFLOW_RUNNABLE_TYPE, ['action_name']]])
if not HAS_REST_CLIENT:
module.fail_json(msg="The python vdirect-client module is required")
try:
vdirect_runnable = VdirectRunnable(module.params)
result = vdirect_runnable.run()
result = dict(result=result)
module.exit_json(**result)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
yufengg/tensorflow | tensorflow/python/framework/graph_util_test.py | 36 | 15243 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_ops as math_ops_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class DeviceFunctionsTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.Variable(0)
with ops.device(test_device_func_pin_variable_to_cpu):
var_1 = variables.Variable(1)
with ops.device(lambda op: "/gpu:0"):
var_2 = variables.Variable(2)
with ops.device("/gpu:0"): # Implicit merging device function.
var_3 = variables.Variable(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testConvertVariablesToConstsWithFunctions(self):
@function.Defun(dtypes.float32)
def plus_one(x):
return x + 1.0
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
defun_node = plus_one(variable_node)
output_node = math_ops_lib.multiply(
defun_node, 2.0, name="output_node")
with session.Session() as sess:
init = variables.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(4.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
self.assertEqual(variable_graph_def.library,
constant_graph_def.library)
def testConvertVariablesToConsts(self):
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
output_node = math_ops_lib.multiply(
variable_node, 2.0, name="output_node")
with session.Session() as sess:
init = variables.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
sess.run(variables.global_variables_initializer())
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(sess, variable_graph_def,
["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(
str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Test variable name black list. This should result in the variable not
# being a const.
sess.run(variables.global_variables_initializer())
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
self.assertEqual(variable_node.op, "VariableV2")
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotEqual("Variable", node.op)
self.assertNotEqual("VariableV2", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_node_def("Aop", "A", ["B"]), self.create_node_def(
"Identity", "B", ["C"]), self.create_node_def(
"Identity", "C", ["D"]), self.create_node_def("Dop", "D", [])
])
expected_graph_def = graph_pb2.GraphDef()
expected_graph_def.node.extend([
self.create_node_def("Aop", "A", ["D"]), self.create_node_def(
"Dop", "D", [])
])
self.assertProtoEquals(expected_graph_def,
graph_util.remove_training_nodes(graph_def))
if __name__ == "__main__":
test.main()
| apache-2.0 |
WatanabeYasumasa/edx-platform | cms/djangoapps/contentstore/management/commands/course_id_clash.py | 29 | 1940 | """
Script for finding all courses whose org/name pairs == other courses when ignoring case
"""
from django.core.management.base import BaseCommand
from xmodule.modulestore.django import modulestore
#
# To run from command line: ./manage.py cms --settings dev course_id_clash
#
class Command(BaseCommand):
"""
Script for finding all courses whose org/name pairs == other courses when ignoring case
"""
help = 'List all courses ids which may collide when ignoring case'
def handle(self, *args, **options):
mstore = modulestore()
if hasattr(mstore, 'collection'):
map_fn = '''
function () {
emit(this._id.org.toLowerCase()+this._id.course.toLowerCase(), {target: this._id});
}
'''
reduce_fn = '''
function (idpair, matches) {
var result = {target: []};
matches.forEach(function (match) {
result.target.push(match.target);
});
return result;
}
'''
finalize = '''
function(key, reduced) {
if (Array.isArray(reduced.target)) {
return reduced;
}
else {return null;}
}
'''
results = mstore.collection.map_reduce(
map_fn, reduce_fn, {'inline': True}, query={'_id.category': 'course'}, finalize=finalize
)
results = results.get('results')
for entry in results:
if entry.get('value') is not None:
print '{:-^40}'.format(entry.get('_id'))
for course_id in entry.get('value').get('target'):
print ' {}/{}/{}'.format(course_id.get('org'), course_id.get('course'), course_id.get('name'))
| agpl-3.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/compiler/tests/stack_ops_test.py | 10 | 4361 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.platform import test
class StackOpTest(XLATestCase):
def testStackPushPop(self):
with self.test_session(), self.test_scope():
size = array_ops.placeholder(dtypes.int32)
v = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval({size: 5, v: [[4.0, 5.0]]}))
def testStackPushPopSwap(self):
with self.test_session(), self.test_scope():
a = np.arange(2000)
x = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, c1.eval({x: a}))
def testMultiStack(self):
with self.test_session(), self.test_scope():
v = array_ops.placeholder(dtypes.float32)
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval({v: 4.0}))
def testSameNameStacks(self):
"""Different stacks with the same name do not interfere."""
with self.test_session() as sess, self.test_scope():
v1 = array_ops.placeholder(dtypes.float32)
v2 = array_ops.placeholder(dtypes.float32)
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = sess.run([pop1, pop2], {v1: 4.0, v2: 5.0})
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
def testCloseStack(self):
with self.test_session() as sess, self.test_scope():
size = array_ops.placeholder(dtypes.int32)
h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1, {size: 5})
def testPushCloseStack(self):
with self.test_session() as sess, self.test_scope():
v = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1, {v: [[4.0, 5.0]]})
if __name__ == "__main__":
test.main()
| apache-2.0 |
Nizametdinov/cnn-pos-tagger | server.py | 1 | 2226 | from aiohttp import web
import json
import os
from model import *
from data_reader import DataReader
from vocab import Vocab
from tensor_generator import TensorGenerator
from download_data import OPEN_CORPORA_DEST_FILE
def init():
global loader, vocab, tensor_generator
loader = DataReader(OPEN_CORPORA_DEST_FILE)
loader.load()
vocab = Vocab(loader)
vocab.load()
tensor_generator = TensorGenerator(loader, vocab)
global input_, predictions, dropout
input_, logits, dropout = model(
max_words_in_sentence=tensor_generator.max_sentence_length,
max_word_length=tensor_generator.max_word_length,
char_vocab_size=vocab.char_vocab_size(),
num_output_classes=vocab.part_vocab_size()
)
_targets, _target_mask, _loss_, _accuracy, predictions = loss(
logits=logits,
batch_size=0,
max_words_in_sentence=tensor_generator.max_sentence_length
)
def split_sentence(sentence_string):
return sentence_string.split(" ")
def calculate_sentence_pos(sentence):
with tf.Session() as session:
restore_model(session)
input_tensors = tensor_generator.tensor_from_sentences([sentence])
predicted = session.run([predictions], {input_: input_tensors, dropout: 0.0})
sentence_prediction = predicted[0][0]
result = [[word, vocab.index_to_speech_part_human(word_prediction)] for word, word_prediction in zip(sentence, sentence_prediction)]
return result
async def calculator(request):
sentence_string = request.rel_url.query.get('sentence')
if not sentence_string:
return web.Response(status=404, text='sentence not specified')
sentence = split_sentence(sentence_string)
if(len(sentence) > tensor_generator.max_sentence_length):
return web.Response(status=422, text='sentence is too long')
result = calculate_sentence_pos(sentence)
return web.json_response({'result': result})
##############
## Server
##############
init()
app = web.Application()
app.router.add_route('GET', '/{tail:.*}', calculator)
# server_port = int(os.environ['SERVER_PORT']) if os.environ['SERVER_PORT'] else 3000
server_port = 8084
web.run_app(app, port=server_port) | mit |
HackerTool/vivisect | envi/expression.py | 5 | 2531 | """
Unified expression helpers.
"""
def evaluate(pycode, locals):
return eval(pycode, {}, locals)
class ExpressionLocals(dict):
"""
An object to act as the locals dictionary for the evaluation
of envi expressions. You may pass in an envi.symstore.resolver.SymbolResolver
object to automagically use symbols in your expressions.
"""
def __init__(self, symobj=None):
dict.__init__(self)
self.symobj = symobj
def __getitem__(self, name):
if self.symobj != None:
ret = self.symobj.getSymByName(name)
if ret != None: return ret
return dict.__getitem__(self, name)
class MemoryExpressionLocals(ExpressionLocals):
def __init__(self, memobj, symobj=None):
ExpressionLocals.__init__(self, symobj=symobj)
self.memobj = memobj
self.update({
'mapbase':self.mapbase,
'maplen':self.maplen,
'ispoi':self.ispoi,
'mem':self.mem,
'poi':self.poi,
'sym':self.sym,
})
def sym(self, symstr):
'''
An easy to use utility for symbols which have un-pythonic names.
Example x = sym('kernel32.??2@$$FYAPAXI@Z')
'''
return long(evaluate(symstr, self))
def mapbase(self, address):
"""
The expression mapbase(address) returns the base address of the
memory mapped area containing "address"
"""
map = self.memobj.getMemoryMap(address)
if not map:
raise Exception("ERROR - un-mapped address in mapbase()")
return map[0]
def maplen(self, address):
"""
The expression maplen(address) returns the length of the
memory mapped area containing "address".
"""
map = self.memobj.getMemoryMap(address)
if not map:
raise Exception("ERROR - un-mapped address in maplen()")
return map[1]
def ispoi(self, addr):
"""
The expression ispoi(value) returns True if the specified value
is a valid pointer. Otherwise, False.
"""
return self.memobj.isValidPointer(addr)
def mem(self, addr, size):
"""
Read and return memory.
Example: mem(ecx, 20)
"""
return self.memobj.readMemory(addr, size)
def poi(self, address):
"""
When expression contains "poi(addr)" this will return
the address pointed to by addr.
"""
return self.memobj.readMemoryPtr(address)
| apache-2.0 |
darktears/chromium-crosswalk | chrome/common/extensions/docs/server2/redirector_test.py | 44 | 7508 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from compiled_file_system import CompiledFileSystem
from object_store_creator import ObjectStoreCreator
from redirector import Redirector
from test_file_system import TestFileSystem
from third_party.json_schema_compiler.json_parse import Parse
HOST = 'localhost/'
file_system = TestFileSystem({
'redirects.json': json.dumps({
'foo/...': 'apps/...',
'': '/index.html',
'home': 'index.html',
'index.html': 'http://something.absolute.com/'
}),
'apps': {
'redirects.json': json.dumps({
'': '../index.html',
'index.html': 'about_apps.html',
'foo.html': '/bar.html',
})
},
'extensions': {
'redirects.json': json.dumps({
'manifest': 'manifest.html',
'tabs': 'tabs.html',
'dev/...': '...',
'a/very/long/dir/chain/...': 'short/...',
'_short/...': 'another/long/chain/...',
'r1/...': 'r2/r1/...',
'r2/r1/...': 'r3/...',
'r3/...': 'r4/...',
'r5/...': 'r6/...',
'nofile1/...': 'nofile2/...',
'noredirects1/...': 'noredirects2/...'
}),
'manifest': {
'redirects.json': json.dumps({
'': '../manifest.html',
'more-info': 'http://lmgtfy.com'
}),
},
'stable': {
'redirects.json': json.dumps({
'tabs': 'tabs.html'
}),
'manifest': {
'redirects.json': json.dumps({
'storage': 'storage.html'
})
},
},
'dev': {
'redirects.json': json.dumps({
'tabs': 'tabs.html',
'manifest': 'manifest.html'
}),
'manifest': {
'redirects.json': json.dumps({
'storage': 'storage.html'
})
}
},
'r4': {
'redirects.json': json.dumps({
'manifest': 'manifest.html'
})
},
'r6': {
'redirects.json': json.dumps({
'...': 'directory/...'
}),
'directory': {
'redirects.json': json.dumps({
'manifest': 'manifest.html'
}),
'manifest': 'manifest.html'
}
},
'short': {
'redirects.json': json.dumps({
'index': 'index.html'
})
},
'another': {
'long': {
'chain': {
'redirects.json': json.dumps({
'index': 'index.html'
})
}
}
},
'nofile': {
'redirects.json': json.dumps({
})
}
},
'priority': {
'redirects.json': json.dumps({
'directory/...': 'GOOD/...'
}),
'directory': {
'redirects.json': json.dumps({
'...': '../BAD/...'
}),
}
},
'relative_directory': {
'redirects.json': json.dumps({
'...': '../...'
})
},
'infinite_redirect': {
'redirects.json': json.dumps({
'...': 'loop/...'
}),
'loop': {
'redirects.json': json.dumps({
'...': './...'
})
}
},
'parent_redirect': {
'redirects.json': json.dumps({
'a/...': 'b/...'
})
}
})
class RedirectorTest(unittest.TestCase):
def setUp(self):
self._redirector = Redirector(
CompiledFileSystem.Factory(ObjectStoreCreator.ForTest()),
file_system)
def testExternalRedirection(self):
self.assertEqual(
'http://something.absolute.com/',
self._redirector.Redirect(HOST, 'index.html'))
self.assertEqual(
'http://lmgtfy.com',
self._redirector.Redirect(HOST, 'extensions/manifest/more-info'))
def testAbsoluteRedirection(self):
self.assertEqual(
'/index.html', self._redirector.Redirect(HOST, ''))
self.assertEqual(
'/bar.html', self._redirector.Redirect(HOST, 'apps/foo.html'))
def testRelativeRedirection(self):
self.assertEqual(
'apps/about_apps.html',
self._redirector.Redirect(HOST, 'apps/index.html'))
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/manifest/'))
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/manifest'))
self.assertEqual(
'index.html', self._redirector.Redirect(HOST, 'apps/'))
self.assertEqual(
'index.html', self._redirector.Redirect(HOST, 'home'))
def testNotFound(self):
self.assertEqual(
None, self._redirector.Redirect(HOST, 'not/a/real/path'))
self.assertEqual(
None, self._redirector.Redirect(HOST, 'public/apps/okay.html'))
def testOldHosts(self):
self.assertEqual(
'https://developer.chrome.com/',
self._redirector.Redirect('code.google.com', ''))
def testRefresh(self):
self._redirector.Refresh().Get()
expected_paths = set([
'redirects.json',
'apps/redirects.json',
'extensions/redirects.json',
'extensions/manifest/redirects.json'
])
for path in expected_paths:
self.assertEqual(
Parse(file_system.ReadSingle(path).Get()),
# Access the cache's object store to see what files were hit during
# the cron run. Returns strings parsed as JSON.
# TODO(jshumway): Make a non hack version of this check.
self._redirector._cache._file_object_store.Get(
path).Get().cache_data)
def testDirectoryRedirection(self):
# Simple redirect.
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/dev/manifest'))
# Multiple hops with one file.
self.assertEqual(
'extensions/r4/manifest.html',
self._redirector.Redirect(HOST, 'extensions/r1/manifest'))
# Multiple hops w/ multiple redirection files.
self.assertEqual(
'extensions/r6/directory/manifest.html',
self._redirector.Redirect(HOST, 'extensions/r5/manifest'))
# Redirection from root directory redirector.
self.assertEqual(
'apps/about_apps.html',
self._redirector.Redirect(HOST, 'foo/index.html'))
# Short to long.
self.assertEqual(
'extensions/short/index.html',
self._redirector.Redirect(HOST, 'extensions/a/very/long/dir/chain/index'))
# Long to short.
self.assertEqual(
'extensions/another/long/chain/index.html',
self._redirector.Redirect(HOST, 'extensions/_short/index'))
# Directory redirection without a redirects.json in final directory.
self.assertEqual(
'extensions/noredirects2/file',
self._redirector.Redirect(HOST, 'extensions/noredirects1/file'))
# Directory redirection with redirects.json without rule for the filename.
self.assertEqual(
'extensions/nofile2/file',
self._redirector.Redirect(HOST, 'extensions/nofile1/file'))
# Relative directory path.
self.assertEqual(
'index.html',
self._redirector.Redirect(HOST, 'relative_directory/home'))
# Shallower directory redirects have priority.
self.assertEqual(
'priority/GOOD/index',
self._redirector.Redirect(HOST, 'priority/directory/index'))
# Don't infinitely redirect.
self.assertEqual('infinite_redirect/loop/index',
self._redirector.Redirect(HOST, 'infinite_redirect/index'))
# If a parent directory is redirected, redirect children properly.
self.assertEqual('parent_redirect/b/c/index',
self._redirector.Redirect(HOST, 'parent_redirect/a/c/index'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
dgarros/ansible | lib/ansible/modules/notification/pushover.py | 34 | 3712 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
# All rights reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
###
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pushover
version_added: "2.0"
short_description: Send notifications via U(https://pushover.net)
description:
- Send notifications via pushover, to subscriber list of devices, and email
addresses. Requires pushover app on devices.
notes:
- You will require a pushover.net account to use this module. But no account
is required to receive messages.
options:
msg:
description:
- What message you wish to send.
required: true
app_token:
description:
- Pushover issued token identifying your pushover app.
required: true
user_key:
description:
- Pushover issued authentication key for your user.
required: true
pri:
description:
- Message priority (see U(https://pushover.net) for details.)
required: false
author: "Jim Richardson (@weaselkeeper)"
'''
EXAMPLES = '''
- pushover:
msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
app_token: wxfdksl
user_key: baa5fe97f2c5ab3ca8f0bb59
delegate_to: localhost
'''
from ansible.module_utils.six.moves.urllib.parse import urlencode
class Pushover(object):
''' Instantiates a pushover object, use it to send notifications '''
base_uri = 'https://api.pushover.net'
def __init__(self, module, user, token):
self.module = module
self.user = user
self.token = token
def run(self, priority, msg):
''' Do, whatever it is, we do. '''
url = '%s/1/messages.json' % (self.base_uri)
# parse config
options = dict(user=self.user,
token=self.token,
priority=priority,
message=msg)
data = urlencode(options)
headers = { "Content-type": "application/x-www-form-urlencoded"}
r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
if info['status'] != 200:
raise Exception(info)
return r.read()
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
app_token=dict(required=True, no_log=True),
user_key=dict(required=True, no_log=True),
pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']),
),
)
msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
try:
response = msg_object.run(module.params['pri'], module.params['msg'])
except:
module.fail_json(msg='Unable to send msg via pushover')
module.exit_json(msg='message sent successfully: %s' % response, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
hamsterbacke23/wagtail | wagtail/wagtaildocs/views/chooser.py | 1 | 4093 | from __future__ import absolute_import, unicode_literals
import json
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, render
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
from wagtail.wagtailadmin.utils import PermissionPolicyChecker
from wagtail.wagtailcore.models import Collection
from wagtail.wagtaildocs.forms import get_document_form
from wagtail.wagtaildocs.models import get_document_model
from wagtail.wagtaildocs.permissions import permission_policy
from wagtail.wagtailsearch.backends import get_search_backends
permission_checker = PermissionPolicyChecker(permission_policy)
def get_document_json(document):
"""
helper function: given a document, return the json to pass back to the
chooser panel
"""
return json.dumps({
'id': document.id,
'title': document.title,
'edit_link': reverse('wagtaildocs:edit', args=(document.id,)),
})
def chooser(request):
Document = get_document_model()
if permission_policy.user_has_permission(request.user, 'add'):
DocumentForm = get_document_form(Document)
uploadform = DocumentForm()
else:
uploadform = None
documents = []
q = None
is_searching = False
if 'q' in request.GET or 'p' in request.GET or 'collection_id' in request.GET:
documents = Document.objects.all()
collection_id = request.GET.get('collection_id')
if collection_id:
documents = documents.filter(collection=collection_id)
searchform = SearchForm(request.GET)
if searchform.is_valid():
q = searchform.cleaned_data['q']
documents = documents.search(q)
is_searching = True
else:
documents = documents.order_by('-created_at')
is_searching = False
# Pagination
paginator, documents = paginate(request, documents, per_page=10)
return render(request, "wagtaildocs/chooser/results.html", {
'documents': documents,
'query_string': q,
'is_searching': is_searching,
})
else:
searchform = SearchForm()
collections = Collection.objects.all()
if len(collections) < 2:
collections = None
documents = Document.objects.order_by('-created_at')
paginator, documents = paginate(request, documents, per_page=10)
return render_modal_workflow(request, 'wagtaildocs/chooser/chooser.html', 'wagtaildocs/chooser/chooser.js', {
'documents': documents,
'uploadform': uploadform,
'searchform': searchform,
'collections': collections,
'is_searching': False,
})
def document_chosen(request, document_id):
document = get_object_or_404(get_document_model(), id=document_id)
return render_modal_workflow(
request, None, 'wagtaildocs/chooser/document_chosen.js',
{'document_json': get_document_json(document)}
)
@permission_checker.require('add')
def chooser_upload(request):
Document = get_document_model()
DocumentForm = get_document_form(Document)
if request.method == 'POST':
document = Document(uploaded_by_user=request.user)
form = DocumentForm(request.POST, request.FILES, instance=document)
if form.is_valid():
form.save()
# Reindex the document to make sure all tags are indexed
for backend in get_search_backends():
backend.add(document)
return render_modal_workflow(
request, None, 'wagtaildocs/chooser/document_chosen.js',
{'document_json': get_document_json(document)}
)
else:
form = DocumentForm()
documents = Document.objects.order_by('title')
return render_modal_workflow(
request, 'wagtaildocs/chooser/chooser.html', 'wagtaildocs/chooser/chooser.js',
{'documents': documents, 'uploadform': form}
)
| bsd-3-clause |
simontakite/sysadmin | pythonscripts/programmingpython/Internet/Sockets/fork-server-signal.py | 2 | 2126 | """
Same as fork-server.py, but use the Python signal module to avoid keeping
child zombie processes after they terminate, instead of an explicit reaper
loop before each new connection; SIG_IGN means ignore, and may not work with
SIG_CHLD child exit signal on all platforms; see Linux documentation for more
about the restartability of a socket.accept call interrupted with a signal;
"""
import os, time, sys, signal, signal
from socket import * # get socket constructor and constants
myHost = '' # server machine, '' means local host
myPort = 50007 # listen on a non-reserved port number
sockobj = socket(AF_INET, SOCK_STREAM) # make a TCP socket object
sockobj.bind((myHost, myPort)) # bind it to server port number
sockobj.listen(5) # up to 5 pending connects
signal.signal(signal.SIGCHLD, signal.SIG_IGN) # avoid child zombie processes
def now(): # time on server machine
return time.ctime(time.time())
def handleClient(connection): # child process replies, exits
time.sleep(5) # simulate a blocking activity
while True: # read, write a client socket
data = connection.recv(1024)
if not data: break
reply = 'Echo=>%s at %s' % (data, now())
connection.send(reply.encode())
connection.close()
os._exit(0)
def dispatcher(): # listen until process killed
while True: # wait for next connection,
connection, address = sockobj.accept() # pass to process for service
print('Server connected by', address, end=' ')
print('at', now())
childPid = os.fork() # copy this process
if childPid == 0: # if in child process: handle
handleClient(connection) # else: go accept next connect
dispatcher()
| gpl-2.0 |
abtink/openthread | tests/scripts/thread-cert/Cert_6_1_05_REEDAttachConnectivity.py | 4 | 5999 | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
from pktverify.consts import MLE_CHILD_ID_REQUEST, MLE_PARENT_REQUEST, MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MLE_FRAME_COUNTER_TLV, TIMEOUT_TLV, ADDRESS_REGISTRATION_TLV, TLV_REQUEST_TLV, LINK_LOCAL_ALL_ROUTERS_MULTICAST_ADDRESS
from pktverify.packet_verifier import PacketVerifier
LEADER = 1
ROUTER1 = 2
REED1 = 3
REED2 = 4
ED = 5
class Cert_6_1_5_REEDAttachConnectivity(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER1, REED1, REED2]
},
ROUTER1: {
'name': 'ROUTER_1',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, REED2]
},
REED1: {
'name': 'REED_1',
'mode': 'rdn',
'panid': 0xface,
'router_upgrade_threshold': 0,
'allowlist': [LEADER, ROUTER1, ED]
},
REED2: {
'name': 'REED_2',
'mode': 'rdn',
'panid': 0xface,
'router_upgrade_threshold': 0,
'allowlist': [LEADER, (ED, -85)]
},
ED: {
'name': 'ED',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'allowlist': [REED1, REED2]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[REED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED1].get_state(), 'child')
self.nodes[REED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED2].get_state(), 'child')
self.nodes[ED].start()
self.simulator.go(10)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.assertEqual(self.nodes[REED1].get_state(), 'router')
self.collect_ipaddrs()
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[REED1].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
REED_1 = pv.vars['REED_1']
ED = pv.vars['ED']
_reed1_pkts = pkts.filter_wpan_src64(REED_1)
_ed_pkts = pkts.filter_wpan_src64(ED)
# Step 2: The DUT MUST send a MLE Parent Request to the
# All-Routers multicast address
_ed_pkts.filter_mle_cmd(MLE_PARENT_REQUEST).filter_ipv6_dst(
LINK_LOCAL_ALL_ROUTERS_MULTICAST_ADDRESS).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type
) and p.mle.tlv.scan_mask.r == 1)
# Step 3: REED_1 and REED_2 No response to Parent Request
# Step 4: DUT Send MLE Parent Request with Scan Mask set to Routers AND REEDs
_ed_pkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 1)
# Step 5: The DUT MUST send a MLE Child ID Request
_ed_pkts.filter_wpan_dst64(REED_1).filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(
lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MLE_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV,
ADDRESS_REGISTRATION_TLV, TLV_REQUEST_TLV
} <= set(p.mle.tlv.type))
# Step 8: The DUT MUST respond with ICMPv6 Echo Reply
ed_mleid = pv.vars['ED_MLEID']
reed1_mleid = pv.vars['REED_1_MLEID']
_pkt = _reed1_pkts.filter(
lambda p: p.ipv6.src == reed1_mleid and p.ipv6.dst == ed_mleid).filter_ping_request().must_next()
_ed_pkts.filter(lambda p: p.ipv6.src == ed_mleid and p.ipv6.dst == reed1_mleid).filter_ping_reply(
identifier=_pkt.icmpv6.echo.identifier).must_next()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
b0ri5/nishe-googlecode | scons/scons-local-1.3.0/SCons/Tool/aixlink.py | 5 | 2571 | """SCons.Tool.aixlink
Tool-specific initialization for the IBM Visual Age linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixlink.py 4720 2010/03/24 03:14:11 jars"
import os
import os.path
import SCons.Util
import aixcc
import link
cplusplus = __import__('c++', globals(), locals(), [])
def smart_linkflags(source, target, env, for_signature):
if cplusplus.iscplusplus(source):
build_dir = env.subst('$BUILDDIR', target=target, source=source)
if build_dir:
return '-qtempinc=' + os.path.join(build_dir, 'tempinc')
return ''
def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['SMARTLINKFLAGS'] = smart_linkflags
env['LINKFLAGS'] = SCons.Util.CLVar('$SMARTLINKFLAGS')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -qmkshrobj -qsuppress=1501-218')
env['SHLIBSUFFIX'] = '.a'
def exists(env):
path, _cc, _shcc, version = aixcc.get_xlc(env)
if path and _cc:
xlc = os.path.join(path, _cc)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-3.0 |
davidam/python-examples | sparql/wikidata-humans.py | 1 | 1480 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint_url = "https://query.wikidata.org/sparql"
query = """#added before 2016-10
#Humans
SELECT ?item ?itemLabel WHERE {
?item wdt:P31 wd:Q5.
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
limit 10"""
def get_results(endpoint_url, query):
sparql = SPARQLWrapper(endpoint_url)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
results = get_results(endpoint_url, query)
for result in results["results"]["bindings"]:
print(result)
| gpl-3.0 |
Harmon758/discord.py | discord/raw_models.py | 2 | 8605 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__all__ = (
'RawMessageDeleteEvent',
'RawBulkMessageDeleteEvent',
'RawMessageUpdateEvent',
'RawReactionActionEvent',
'RawReactionClearEvent',
'RawReactionClearEmojiEvent',
'RawIntegrationDeleteEvent',
)
class _RawReprMixin:
def __repr__(self):
value = ' '.join(f'{attr}={getattr(self, attr)!r}' for attr in self.__slots__)
return f'<{self.__class__.__name__} {value}>'
class RawMessageDeleteEvent(_RawReprMixin):
"""Represents the event payload for a :func:`on_raw_message_delete` event.
Attributes
------------
channel_id: :class:`int`
The channel ID where the deletion took place.
guild_id: Optional[:class:`int`]
The guild ID where the deletion took place, if applicable.
message_id: :class:`int`
The message ID that got deleted.
cached_message: Optional[:class:`Message`]
The cached message, if found in the internal message cache.
"""
__slots__ = ('message_id', 'channel_id', 'guild_id', 'cached_message')
def __init__(self, data):
self.message_id = int(data['id'])
self.channel_id = int(data['channel_id'])
self.cached_message = None
try:
self.guild_id = int(data['guild_id'])
except KeyError:
self.guild_id = None
class RawBulkMessageDeleteEvent(_RawReprMixin):
"""Represents the event payload for a :func:`on_raw_bulk_message_delete` event.
Attributes
-----------
message_ids: Set[:class:`int`]
A :class:`set` of the message IDs that were deleted.
channel_id: :class:`int`
The channel ID where the message got deleted.
guild_id: Optional[:class:`int`]
The guild ID where the message got deleted, if applicable.
cached_messages: List[:class:`Message`]
The cached messages, if found in the internal message cache.
"""
__slots__ = ('message_ids', 'channel_id', 'guild_id', 'cached_messages')
def __init__(self, data):
self.message_ids = {int(x) for x in data.get('ids', [])}
self.channel_id = int(data['channel_id'])
self.cached_messages = []
try:
self.guild_id = int(data['guild_id'])
except KeyError:
self.guild_id = None
class RawMessageUpdateEvent(_RawReprMixin):
"""Represents the payload for a :func:`on_raw_message_edit` event.
Attributes
-----------
message_id: :class:`int`
The message ID that got updated.
channel_id: :class:`int`
The channel ID where the update took place.
.. versionadded:: 1.3
guild_id: Optional[:class:`int`]
The guild ID where the message got updated, if applicable.
.. versionadded:: 1.7
data: :class:`dict`
The raw data given by the `gateway <https://discord.com/developers/docs/topics/gateway#message-update>`_
cached_message: Optional[:class:`Message`]
The cached message, if found in the internal message cache. Represents the message before
it is modified by the data in :attr:`RawMessageUpdateEvent.data`.
"""
__slots__ = ('message_id', 'channel_id', 'guild_id', 'data', 'cached_message')
def __init__(self, data):
self.message_id = int(data['id'])
self.channel_id = int(data['channel_id'])
self.data = data
self.cached_message = None
try:
self.guild_id = int(data['guild_id'])
except KeyError:
self.guild_id = None
class RawReactionActionEvent(_RawReprMixin):
"""Represents the payload for a :func:`on_raw_reaction_add` or
:func:`on_raw_reaction_remove` event.
Attributes
-----------
message_id: :class:`int`
The message ID that got or lost a reaction.
user_id: :class:`int`
The user ID who added the reaction or whose reaction was removed.
channel_id: :class:`int`
The channel ID where the reaction got added or removed.
guild_id: Optional[:class:`int`]
The guild ID where the reaction got added or removed, if applicable.
emoji: :class:`PartialEmoji`
The custom or unicode emoji being used.
member: Optional[:class:`Member`]
The member who added the reaction. Only available if `event_type` is `REACTION_ADD` and the reaction is inside a guild.
.. versionadded:: 1.3
event_type: :class:`str`
The event type that triggered this action. Can be
``REACTION_ADD`` for reaction addition or
``REACTION_REMOVE`` for reaction removal.
.. versionadded:: 1.3
"""
__slots__ = ('message_id', 'user_id', 'channel_id', 'guild_id', 'emoji',
'event_type', 'member')
def __init__(self, data, emoji, event_type):
self.message_id = int(data['message_id'])
self.channel_id = int(data['channel_id'])
self.user_id = int(data['user_id'])
self.emoji = emoji
self.event_type = event_type
self.member = None
try:
self.guild_id = int(data['guild_id'])
except KeyError:
self.guild_id = None
class RawReactionClearEvent(_RawReprMixin):
"""Represents the payload for a :func:`on_raw_reaction_clear` event.
Attributes
-----------
message_id: :class:`int`
The message ID that got its reactions cleared.
channel_id: :class:`int`
The channel ID where the reactions got cleared.
guild_id: Optional[:class:`int`]
The guild ID where the reactions got cleared.
"""
__slots__ = ('message_id', 'channel_id', 'guild_id')
def __init__(self, data):
self.message_id = int(data['message_id'])
self.channel_id = int(data['channel_id'])
try:
self.guild_id = int(data['guild_id'])
except KeyError:
self.guild_id = None
class RawReactionClearEmojiEvent(_RawReprMixin):
"""Represents the payload for a :func:`on_raw_reaction_clear_emoji` event.
.. versionadded:: 1.3
Attributes
-----------
message_id: :class:`int`
The message ID that got its reactions cleared.
channel_id: :class:`int`
The channel ID where the reactions got cleared.
guild_id: Optional[:class:`int`]
The guild ID where the reactions got cleared.
emoji: :class:`PartialEmoji`
The custom or unicode emoji being removed.
"""
__slots__ = ('message_id', 'channel_id', 'guild_id', 'emoji')
def __init__(self, data, emoji):
self.emoji = emoji
self.message_id = int(data['message_id'])
self.channel_id = int(data['channel_id'])
try:
self.guild_id = int(data['guild_id'])
except KeyError:
self.guild_id = None
class RawIntegrationDeleteEvent(_RawReprMixin):
"""Represents the payload for a :func:`on_raw_integration_delete` event.
.. versionadded:: 2.0
Attributes
-----------
integration_id: :class:`int`
The ID of the integration that got deleted.
application_id: Optional[:class:`int`]
The ID of the bot/OAuth2 application for this deleted integration.
guild_id: :class:`int`
The guild ID where the integration got deleted.
"""
__slots__ = ('integration_id', 'application_id', 'guild_id')
def __init__(self, data):
self.integration_id = int(data['id'])
self.guild_id = int(data['guild_id'])
try:
self.application_id = int(data['application_id'])
except KeyError:
self.application_id = None
| mit |
usersource/anno | anno_gec_server/api/account_api.py | 1 | 7816 | import endpoints
from protorpc import message_types
from protorpc import remote
import json
from helper.settings import anno_js_client_id
from helper.utils import validate_email
from helper.utils import validate_password
from helper.utils import validate_team_secret
from helper.utils import md5
from helper.utils import get_endpoints_current_user
from helper.utils import reset_password
from helper.utils import get_user_team_token
from helper.utils_enum import AuthSourceType
from model.user import User
from model.community import Community
from model.userrole import UserRole
from model.anno import Anno
from message.account_message import AccountMessage
from message.account_message import AccountAuthenticateMessage
from message.account_message import AccountAuthenticateListMessage
from message.user_message import UserMessage
from message.anno_api_messages import AnnoListMessage
@endpoints.api(name='account', version='1.0', description='Account API',
allowed_client_ids=[endpoints.API_EXPLORER_CLIENT_ID, anno_js_client_id])
class AccountApi(remote.Service):
@endpoints.method(AccountMessage, UserMessage, path='account/register', http_method='POST',
name='account.register')
def register(self, request):
email = request.user_email
validate_email(email)
password = request.password
validate_password(password)
user = User.find_user_by_email(email)
if user is not None:
raise endpoints.BadRequestException("Email(" + email + ") already exists.")
display_name = request.display_name
if display_name is None or display_name == '':
raise endpoints.BadRequestException("Registration failed. Display name is missing.")
user = User.find_user_by_display_name(display_name)
if user is not None:
raise endpoints.BadRequestException("Display name(" + display_name + ") already exists.")
user = User.insert_user(email=email, username=display_name, password=md5(password))
return UserMessage(id=user.key.id())
@endpoints.method(AccountMessage, UserMessage, path='account/authenticate',
http_method='POST', name='account.authenticate')
def authenticate(self, request):
email = request.user_email
validate_email(email)
team_key = request.team_key
user = User.find_user_by_email(email, team_key)
if team_key:
team_secret = request.team_secret
validate_team_secret(team_secret)
display_name = request.display_name
image_url = request.user_image_url
if not user:
user = User.insert_user(email=email, username=display_name, account_type=team_key, image_url=image_url)
community = Community.getCommunityFromTeamKey(team_key)
UserRole.insert(user, community)
elif (display_name != user.display_name) or (image_url != user.image_url):
User.update_user(user=user, email=email, username=display_name, account_type=team_key, image_url=image_url)
if not Community.authenticate(team_key, md5(team_secret)):
raise endpoints.UnauthorizedException("Authentication failed. Team key and secret are not matched.")
elif user.auth_source == AuthSourceType.ANNO:
password = request.password
validate_password(password)
if not user:
raise endpoints.NotFoundException("Authentication failed. User account " + email + " doesn't exist.")
if not User.authenticate(email, md5(password)):
raise endpoints.UnauthorizedException("Authentication failed. Email and password are not matched.")
else:
raise endpoints.ForbiddenException("Account for '%s' is Google or Facebook OAuth account." % email)
return UserMessage(id=user.key.id(), display_name=user.display_name)
@endpoints.method(AccountMessage, AccountAuthenticateListMessage, path="account/dashboard/authenticate",
http_method="POST", name="account.dashboard.authenticate")
def dashboard_authenticate(self, request):
email = request.user_email
password = request.password
team_key = request.team_key
accounts = []
if email and password:
users = User.get_all_user_by_email(email, md5(password), team_key=team_key)
for user in users:
if user:
team_key = user.account_type
team = Community.getCommunityFromTeamKey(team_key)
if team:
userTeamToken = get_user_team_token(email, password, team_key,
team.team_secret, user.display_name,
user.image_url)
account = AccountAuthenticateMessage(display_name=user.display_name,
image_url=user.image_url,
team_name=team.name,
team_key=team_key,
team_hash=team.team_hash,
user_team_token=json.dumps(userTeamToken),
role=UserRole.getRole(user, team))
accounts.append(account)
return AccountAuthenticateListMessage(authenticated=True if len(accounts) else False, account_info=accounts)
@endpoints.method(AccountMessage, AccountAuthenticateListMessage, path="account/dashboard/teams",
http_method="GET", name="account.dashboard.teams")
def get_dashboard_teams(self, request):
accounts = User.get_all_teams_by_email(request.user_email)
return AccountAuthenticateListMessage(account_info=accounts)
@endpoints.method(AccountMessage, message_types.VoidMessage, path='account/forgot_detail',
http_method='POST', name='account.forgot_detail')
def forgot_details(self, request):
user = User.find_user_by_email(request.user_email)
if user:
if user.auth_source == AuthSourceType.ANNO:
validate_email(request.user_email)
reset_password(user, request.user_email)
else:
raise endpoints.ForbiddenException("Account for '%s' is Google or Facebook OAuth account." % request.user_email)
else:
raise endpoints.NotFoundException("Email address is not found. Please enter correct email address.")
return message_types.VoidMessage()
@endpoints.method(AccountMessage, message_types.VoidMessage, path='account/bind_account', http_method='POST',
name='account.bind_account')
def bind_account(self, request):
if request.user_email is None:
raise endpoints.UnauthorizedException("Oops, something went wrong. Please try later.")
auth_source = request.auth_source
if auth_source is None:
auth_source = AuthSourceType.GOOGLE
email = request.user_email
user = User.find_user_by_email(email)
if user is not None:
user.auth_source = auth_source
user.display_name = request.display_name
user.put()
else:
User.insert_user(email=email, username=request.display_name)
return message_types.VoidMessage()
| mpl-2.0 |
CapOM/ChromiumGStreamerBackend | build/android/pylib/results/flakiness_dashboard/results_uploader.py | 7 | 6766 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Uploads the results to the flakiness dashboard server."""
# pylint: disable=E1002,R0201
import logging
import os
import shutil
import tempfile
import xml
from devil.utils import cmd_helper
from pylib import constants
from pylib.results.flakiness_dashboard import json_results_generator
from pylib.utils import repo_utils
class JSONResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
"""Writes test results to a JSON file and handles uploading that file to
the test results server.
"""
def __init__(self, builder_name, build_name, build_number, tmp_folder,
test_results_map, test_results_server, test_type, master_name):
super(JSONResultsGenerator, self).__init__(
builder_name=builder_name,
build_name=build_name,
build_number=build_number,
results_file_base_path=tmp_folder,
builder_base_url=None,
test_results_map=test_results_map,
svn_repositories=(('webkit', 'third_party/WebKit'),
('chrome', '.')),
test_results_server=test_results_server,
test_type=test_type,
master_name=master_name)
#override
def _GetModifierChar(self, test_name):
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
return self._test_results_map[test_name].modifier
#override
def _GetSVNRevision(self, in_directory):
"""Returns the git/svn revision for the given directory.
Args:
in_directory: The directory relative to src.
"""
def _is_git_directory(in_directory):
"""Returns true if the given directory is in a git repository.
Args:
in_directory: The directory path to be tested.
"""
if os.path.exists(os.path.join(in_directory, '.git')):
return True
parent = os.path.dirname(in_directory)
if parent == constants.DIR_SOURCE_ROOT or parent == in_directory:
return False
return _is_git_directory(parent)
in_directory = os.path.join(constants.DIR_SOURCE_ROOT, in_directory)
if not os.path.exists(os.path.join(in_directory, '.svn')):
if _is_git_directory(in_directory):
return repo_utils.GetGitHeadSHA1(in_directory)
else:
return ''
output = cmd_helper.GetCmdOutput(['svn', 'info', '--xml'], cwd=in_directory)
try:
dom = xml.dom.minidom.parseString(output)
return dom.getElementsByTagName('entry')[0].getAttribute('revision')
except xml.parsers.expat.ExpatError:
return ''
return ''
class ResultsUploader(object):
"""Handles uploading buildbot tests results to the flakiness dashboard."""
def __init__(self, tests_type):
self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
self._tests_type = tests_type
if not self._build_number or not self._builder_name:
raise Exception('You should not be uploading tests results to the server'
'from your local machine.')
upstream = (tests_type != 'Chromium_Android_Instrumentation')
if upstream:
# TODO(frankf): Use factory properties (see buildbot/bb_device_steps.py)
# This requires passing the actual master name (e.g. 'ChromiumFYI' not
# 'chromium.fyi').
from slave import slave_utils # pylint: disable=F0401
self._build_name = slave_utils.SlaveBuildName(constants.DIR_SOURCE_ROOT)
self._master_name = slave_utils.GetActiveMaster()
else:
self._build_name = 'chromium-android'
buildbot_branch = os.environ.get('BUILDBOT_BRANCH')
if not buildbot_branch:
buildbot_branch = 'master'
else:
# Ensure there's no leading "origin/"
buildbot_branch = buildbot_branch[buildbot_branch.find('/') + 1:]
self._master_name = '%s-%s' % (self._build_name, buildbot_branch)
self._test_results_map = {}
def AddResults(self, test_results):
# TODO(frankf): Differentiate between fail/crash/timeouts.
conversion_map = [
(test_results.GetPass(), False,
json_results_generator.JSONResultsGeneratorBase.PASS_RESULT),
(test_results.GetFail(), True,
json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
(test_results.GetCrash(), True,
json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
(test_results.GetTimeout(), True,
json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
(test_results.GetUnknown(), True,
json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT),
]
for results_list, failed, modifier in conversion_map:
for single_test_result in results_list:
test_result = json_results_generator.TestResult(
test=single_test_result.GetName(),
failed=failed,
elapsed_time=single_test_result.GetDuration() / 1000)
# The WebKit TestResult object sets the modifier it based on test name.
# Since we don't use the same test naming convention as WebKit the
# modifier will be wrong, so we need to overwrite it.
test_result.modifier = modifier
self._test_results_map[single_test_result.GetName()] = test_result
def Upload(self, test_results_server):
if not self._test_results_map:
return
tmp_folder = tempfile.mkdtemp()
try:
results_generator = JSONResultsGenerator(
builder_name=self._builder_name,
build_name=self._build_name,
build_number=self._build_number,
tmp_folder=tmp_folder,
test_results_map=self._test_results_map,
test_results_server=test_results_server,
test_type=self._tests_type,
master_name=self._master_name)
json_files = ["incremental_results.json", "times_ms.json"]
results_generator.GenerateJSONOutput()
results_generator.GenerateTimesMSFile()
results_generator.UploadJSONFiles(json_files)
except Exception as e: # pylint: disable=broad-except
logging.error("Uploading results to test server failed: %s.", e)
finally:
shutil.rmtree(tmp_folder)
def Upload(results, flakiness_dashboard_server, test_type):
"""Reports test results to the flakiness dashboard for Chrome for Android.
Args:
results: test results.
flakiness_dashboard_server: the server to upload the results to.
test_type: the type of the tests (as displayed by the flakiness dashboard).
"""
uploader = ResultsUploader(test_type)
uploader.AddResults(results)
uploader.Upload(flakiness_dashboard_server)
| bsd-3-clause |
LaiTash/OEUO-python | profiles/default/scripts/value_calc.py | 1 | 6134 | from uo.serpent.script import ScriptBase
from uo.serpent.props import *
from uo.tools.items import Item, get_by_id
from uo.tools.extensions import request_target
class ValueCalculatorScript(ScriptBase):
script_name = 'Value calculator'
base_value = IntSetting('Base Value', default=1000)
area_damage_factor = FloatSetting('fArea Damage', default=2, group='Damage')
balanced_factor = FloatSetting('fBalanced', default=2, group='Properties')
best_wpn_skill_factor = FloatSetting('fUse Best Weapon Skill', default=2, group='Properties')
cold_resist_factor = FloatSetting('fCold Resist', default=5, group='Resistances')
damage_increase_factor = FloatSetting('fDamage Increase', default=100, group='Damage')
defence_chance_increase_factor = FloatSetting('fDefence Chance Increase', default=100, group='Defense')
dex_bonus_factor = FloatSetting('fDextierity Bonus', default=10, group='Stats')
durability_factor = FloatSetting('fDurability', default=1.2, group='Properties')
energy_resist_factor = FloatSetting('fEnergy Resist', default=5, group='Resistances')
enhance_potions_factor = FloatSetting('fEnhance Potions', default=100, group='Effects')
faster_cast_recovery_factor = FloatSetting('fFaster Cast Recovery', default=1000, group='Effects')
faster_casting_factor = FloatSetting('fFaster Casting', default=1000, group='Effects')
fire_resist_factor = FloatSetting('fFire Resist', default=5, group='Resistances')
hit_chance_increase_factor = FloatSetting('fHit Chance Increase', default=3, group='Damage')
hit_dispel_factor = FloatSetting('fHit Dispel', default=1.2, group='Damage')
hit_maroow = FloatSetting('fHit Magic Arrow', default=5, group='Damage')
hit_fireball = FloatSetting('fHit Fireball', default=10, group='Damage')
hit_lightning = FloatSetting('fHit Lightning', default=15, group='Damage')
hit_life_leech = FloatSetting('fHit Life Leech', default=4, group='Damage')
hit_lower_attack = FloatSetting('fHit Lower Attack', default=4, group='Damage')
hit_lower_defence = FloatSetting('fHit Lower Defence', default=4, group='Damage')
hit_mana_leech = FloatSetting('fHit Mana Leech', default=4, group='Damage')
intelligence_bonus = FloatSetting('fIntelligence Bonus', default=10, group='Stats')
lower_stamina_cost = FloatSetting('fLower Stamina Cost', default=5, group='Effects')
lower_mana_cost = FloatSetting('fLower Mana Cost', default=20, group='Effects')
lower_reagent_cost = FloatSetting('fLower Reagent Cost', default=20, group='Effects')
lower_requirements = FloatSetting('fLower Requirements', default=1.2, group='Properties')
luck_factor = FloatSetting('fLuck', default=1000, group='Effects')
mage_armor = FloatSetting('fMage Armor', default=100, group='Properties')
mana_regen = FloatSetting('fManaRegeneration', default=100, group='Stats')
phys_resist = FloatSetting('fPhysical Resist', default=8, group='Resistances')
pois_resist = FloatSetting('fPoison Resist', default=5, group='Resistances')
water_resist = FloatSetting('fWater Resist', default=5, group='Resistances')
cold_resist = FloatSetting('fCold Resist', default=5, group='Resistances')
reflect_phys = FloatSetting('fReflect Physical Damage', default=2, group='Resistances')
self_repair = FloatSetting('fSelf Repair', default=30, group='Properties')
spell_channel = FloatSetting('fSpell Channeling', default=100, group='Properties')
spell_dam_inc = FloatSetting('fSpell Damage Increase', default=20, group='Effects')
stamina_increase = FloatSetting('fStamina Increase', default=5, group='Stats')
stamina_regen = FloatSetting('fStamina Regeneration', default=50, group='Stats')
strenght_bonus = FloatSetting('fStrenght Bonus', default=10, group='Stats')
swing_speed_inc = FloatSetting('fSwing Speed Increase', default=50, group='Damage')
velocity_factor = FloatSetting('fVelocity', default=200, group='Properties')
poison_damage = FloatSetting('fPoison Damage', default=20, group='Damage Type')
energy_damage = FloatSetting('fEnergy Damage', default=20, group='Damage Type')
water_damage = FloatSetting('fWater Damage', default=20, group='Damage Type')
cold_damage = FloatSetting('fCold Damage', default=20, group='Damage Type')
fire_damage = FloatSetting('fFire Damage', default=20, group='Damage Type')
phys_damage = FloatSetting('fPhysical Damage', default=10, group='Damage Type')
#TODO: slayers
#TODO: skill bonuses
bushido = FloatSetting('fBushido', default=50, group='Skills')
tactics = FloatSetting('fTactics', default=30, group='Skills')
swordsmanship = FloatSetting('fSwordsmanship', default=40, group='Skills')
archery = FloatSetting('fArchery', default=40, group='Skills')
magery = FloatSetting('fMagery', default=50, group='Skills')
eval_int = FloatSetting('fEval Intelligence', default=30, group='Skills')
anatomy = FloatSetting('fAnatomy', default=30, group='Skills')
healing = FloatSetting('fHealing', default=50, group='Skills')
chivalry = FloatSetting('fChivalry', default=50, group='Skills')
ninjitsu = FloatSetting('fNinjitsu', default=50, group='Skills')
stealth = FloatSetting('fStealth', default=50, group='Skills')
hiding = FloatSetting('fHiding', default=50, group='Skills')
@method_bind('Show item value')
def show_value(self):
target = request_target()
if not target:
return
item = get_by_id(target.id_)
value = self.calculate_value(item)
print value
def calculate_value(self, item):
"""
:type item Item
"""
result = 0
props = item.properties
settings = self.fetch_settings()
print props.full_string()
for setting_name, setting in settings:
if setting.name.find('f') != 0:
continue
rest = setting.name[1:]
if rest in props:
prop = props[rest]
value = prop.value
result += self.base_value + value * setting.value
return result
| gpl-3.0 |
malin1993ml/h-store | third_party/cpp/protobuf/python/google/protobuf/internal/generator_test.py | 42 | 9038 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): Flesh this out considerably. We focused on reflection_test.py
# first, since it's testing the subtler code, and since it provides decent
# indirect testing of the protocol compiler output.
"""Unittest that directly tests the output of the pure-Python protocol
compiler. See //google/protobuf/reflection_test.py for a test which
further ensures that we can use Python protocol message objects as we expect.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_no_generic_services_pb2
MAX_EXTENSION = 536870912
class GeneratorTest(unittest.TestCase):
def testNestedMessageDescriptor(self):
field_name = 'optional_nested_message'
proto_type = unittest_pb2.TestAllTypes
self.assertEqual(
proto_type.NestedMessage.DESCRIPTOR,
proto_type.DESCRIPTOR.fields_by_name[field_name].message_type)
def testEnums(self):
# We test only module-level enums here.
# TODO(robinson): Examine descriptors directly to check
# enum descriptor output.
self.assertEqual(4, unittest_pb2.FOREIGN_FOO)
self.assertEqual(5, unittest_pb2.FOREIGN_BAR)
self.assertEqual(6, unittest_pb2.FOREIGN_BAZ)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testExtremeDefaultValues(self):
message = unittest_pb2.TestExtremeDefaultValues()
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
self.assertTrue(isinf(message.inf_double))
self.assertTrue(message.inf_double > 0)
self.assertTrue(isinf(message.neg_inf_double))
self.assertTrue(message.neg_inf_double < 0)
self.assertTrue(isnan(message.nan_double))
self.assertTrue(isinf(message.inf_float))
self.assertTrue(message.inf_float > 0)
self.assertTrue(isinf(message.neg_inf_float))
self.assertTrue(message.neg_inf_float < 0)
self.assertTrue(isnan(message.nan_float))
def testHasDefaultValues(self):
desc = unittest_pb2.TestAllTypes.DESCRIPTOR
expected_has_default_by_name = {
'optional_int32': False,
'repeated_int32': False,
'optional_nested_message': False,
'default_int32': True,
}
has_default_by_name = dict(
[(f.name, f.has_default_value)
for f in desc.fields
if f.name in expected_has_default_by_name])
self.assertEqual(expected_has_default_by_name, has_default_by_name)
def testContainingTypeBehaviorForExtensions(self):
self.assertEqual(unittest_pb2.optional_int32_extension.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
self.assertEqual(unittest_pb2.TestRequired.single.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
def testExtensionScope(self):
self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope,
None)
self.assertEqual(unittest_pb2.TestRequired.single.extension_scope,
unittest_pb2.TestRequired.DESCRIPTOR)
def testIsExtension(self):
self.assertTrue(unittest_pb2.optional_int32_extension.is_extension)
self.assertTrue(unittest_pb2.TestRequired.single.is_extension)
message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR
non_extension_descriptor = message_descriptor.fields_by_name['a']
self.assertTrue(not non_extension_descriptor.is_extension)
def testOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testNestedTypes(self):
self.assertEquals(
set(unittest_pb2.TestAllTypes.DESCRIPTOR.nested_types),
set([
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
unittest_pb2.TestAllTypes.OptionalGroup.DESCRIPTOR,
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR,
]))
self.assertEqual(unittest_pb2.TestEmptyMessage.DESCRIPTOR.nested_types, [])
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.nested_types, [])
def testContainingType(self):
self.assertTrue(
unittest_pb2.TestEmptyMessage.DESCRIPTOR.containing_type is None)
self.assertTrue(
unittest_pb2.TestAllTypes.DESCRIPTOR.containing_type is None)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testContainingTypeInEnumDescriptor(self):
self.assertTrue(unittest_pb2._FOREIGNENUM.containing_type is None)
self.assertEqual(unittest_pb2._TESTALLTYPES_NESTEDENUM.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testPackage(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.file.package,
'protobuf_unittest')
desc = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR
self.assertEqual(desc.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_import_pb2.ImportMessage.DESCRIPTOR.file.package,
'protobuf_unittest_import')
self.assertEqual(
unittest_pb2._FOREIGNENUM.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_pb2._TESTALLTYPES_NESTEDENUM.file.package,
'protobuf_unittest')
self.assertEqual(
unittest_import_pb2._IMPORTENUM.file.package,
'protobuf_unittest_import')
def testExtensionRange(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.extension_ranges, [])
self.assertEqual(
unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges,
[(1, MAX_EXTENSION)])
self.assertEqual(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges,
[(42, 43), (4143, 4244), (65536, MAX_EXTENSION)])
def testFileDescriptor(self):
self.assertEqual(unittest_pb2.DESCRIPTOR.name,
'google/protobuf/unittest.proto')
self.assertEqual(unittest_pb2.DESCRIPTOR.package, 'protobuf_unittest')
self.assertFalse(unittest_pb2.DESCRIPTOR.serialized_pb is None)
def testNoGenericServices(self):
# unittest_no_generic_services.proto should contain defs for everything
# except services.
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "TestMessage"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "FOO"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "test_extension"))
self.assertFalse(hasattr(unittest_no_generic_services_pb2, "TestService"))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
centrologic/django-codenerix-products | codenerix_products/migrations/0001_initial.py | 1 | 61602 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-17 15:28
from __future__ import unicode_literals
import codenerix.fields
import codenerix.lib.helpers
import codenerix_products.models
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(blank=True, max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('type_value', models.CharField(choices=[('F', 'Sin validacion'), ('B', 'Boolean'), ('O', 'Lista de opciones')], default='F', max_length=2, verbose_name='Type value')),
('price', models.FloatField(default=0, verbose_name='Price')),
('type_price', models.CharField(choices=[('P', 'Porcentaje sobre el precio del producto'), ('I', 'Incremento sobre el precio del producto'), ('F', 'Precio final')], default='P', max_length=2, verbose_name='Type price')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='AttributeTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='AttributeTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(blank=True, max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('outstanding', models.BooleanField(default=True, verbose_name='Outstanding')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
('show_menu', models.BooleanField(default=True, verbose_name='Show menu')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='BrandTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='BrandTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('code', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Code')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('show_menu', models.BooleanField(default=True, verbose_name='Show menu')),
('show_only_product_stock', models.BooleanField(default=True, verbose_name='Show only products in stock')),
('image', codenerix.fields.ImageAngularField(blank=True, help_text='Se aconseja un tamaño comprendido entre 1200px y 2000px', max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('icon', codenerix.fields.ImageAngularField(blank=True, help_text='Se aconseja que sea una imagen superior a 200px transparente y en formato png o svg', max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Icon')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='CategoryTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='CategoryTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(blank=True, max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('code', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Code')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
('show_menu', models.BooleanField(default=True, verbose_name='Show menu')),
('icon', codenerix.fields.ImageAngularField(blank=True, help_text='Se aconseja que sea una imagen superior a 200px transparente y en formato png o svg', max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Icon')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FamilyTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FamilyTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='Feature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(blank=True, max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('type_value', models.CharField(choices=[('F', 'Sin validacion'), ('B', 'Boolean'), ('O', 'Lista de opciones')], default='F', max_length=2, verbose_name='Type value')),
('price', models.FloatField(default=0, verbose_name='Price')),
('type_price', models.CharField(choices=[('P', 'Porcentaje sobre el precio del producto'), ('I', 'Incremento sobre el precio del producto'), ('F', 'Precio final')], default='P', max_length=2, verbose_name='Type price')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FeatureSpecial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(blank=True, max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('type_value', models.CharField(choices=[('F', 'Sin validacion'), ('B', 'Boolean'), ('O', 'Lista de opciones')], default='F', max_length=2, verbose_name='Type value')),
('price', models.FloatField(default=0, verbose_name='Price')),
('type_price', models.CharField(choices=[('P', 'Porcentaje sobre el precio del producto'), ('I', 'Incremento sobre el precio del producto'), ('F', 'Precio final')], default='P', max_length=2, verbose_name='Type price')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
('unique', models.BooleanField(default=True, verbose_name='The value must unique')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FeatureSpecialTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FeatureSpecialTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FeatureTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FeatureTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FlagshipProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(max_length=200, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('view_video', models.BooleanField(default=False, verbose_name='View video')),
('orientazion', models.CharField(choices=[('R', 'Right'), ('L', 'Left')], default='R', max_length=2, verbose_name='Orientazion')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
bases=(codenerix_products.models.CustomQueryMixin, models.Model),
),
migrations.CreateModel(
name='FlagshipProductTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('title', models.CharField(max_length=250, verbose_name='Text alternavive image')),
('description', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='FlagshipProductTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('title', models.CharField(max_length=250, verbose_name='Text alternavive image')),
('description', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='GroupValueAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('name', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Name')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='GroupValueFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('name', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Name')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='GroupValueFeatureSpecial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('name', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Name')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueAttributeTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueAttributeTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueFeatureSpecial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueFeatureSpecialTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueFeatureSpecialTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueFeatureTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='OptionValueFeatureTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('model', models.CharField(blank=True, max_length=250, null=True, verbose_name='Model')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('code', models.CharField(max_length=250, unique=True, verbose_name='Code')),
('price_base', models.FloatField(default=0, verbose_name='Price base')),
('of_sales', models.BooleanField(default=True, verbose_name='Sales')),
('of_purchase', models.BooleanField(default=True, verbose_name='Purchase')),
('force_stock', models.BooleanField(default=True, verbose_name='Force stock')),
('url_video', models.CharField(blank=True, max_length=250, null=True, verbose_name='Url Video')),
('packing_cost', models.FloatField(default=0, verbose_name='Packing cost')),
('weight', models.FloatField(default=0, verbose_name='Weight')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
bases=(codenerix_products.models.CustomQueryMixin, models.Model),
),
migrations.CreateModel(
name='ProductDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('doc_path', codenerix.fields.FileAngularField(max_length=200, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Doc Path')),
('name_file', models.CharField(max_length=254, verbose_name='Name')),
('public', models.BooleanField(default=False, verbose_name='Public')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductDocumentTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductDocumentTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('value', models.CharField(max_length=80, verbose_name='Value')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('offer', models.BooleanField(default=False, verbose_name='Offer')),
('outstanding', models.BooleanField(default=False, verbose_name='Outstanding')),
('most_sold', models.BooleanField(default=False, verbose_name='Most sold')),
('stock_real', models.FloatField(default=0, editable=False, verbose_name='Stock real')),
('stock_lock', models.FloatField(default=0, editable=False, verbose_name='Stock lock')),
('price_base', models.FloatField(default=0, editable=False, verbose_name='Price base')),
('price', models.FloatField(default=0, editable=False, verbose_name='Price')),
('ean13', models.CharField(blank=True, max_length=13, null=True, verbose_name='EAN-13')),
('reviews_value', models.FloatField(default=0, editable=False, verbose_name='Reviews')),
('reviews_count', models.IntegerField(default=0, editable=False, verbose_name='Reviews count')),
('sample', models.BooleanField(default=False, help_text='If this option is checked the product can not be sold', verbose_name='Sample')),
('code', models.CharField(blank=True, help_text='If it is empty, code is equal to code product', max_length=250, null=True, unique=True, verbose_name='Code')),
('price_base_local', models.FloatField(blank=True, help_text='If it is empty, price base is equal to price base of product', null=True, verbose_name='Price base')),
('packing_cost', models.FloatField(blank=True, help_text='If it is empty, packing cost is equal to packing cost of product', null=True, verbose_name='Packing cost')),
('weight', models.FloatField(blank=True, help_text='If it is empty, weight is equal to weight of product', null=True, verbose_name='Weight')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
bases=(codenerix_products.models.CustomQueryMixin, models.Model),
),
migrations.CreateModel(
name='ProductFinalAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('value', models.CharField(max_length=80, verbose_name='Value')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(max_length=200, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('principal', models.BooleanField(default=False, verbose_name='Principal')),
('flagship_product', models.BooleanField(default=False, verbose_name='Flagship product')),
('outstanding', models.BooleanField(default=False, verbose_name='Outstanding')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalImageTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalImageTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalOptionTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalOptionTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('tags', models.TextField(blank=True, null=True, verbose_name='TAGs')),
('description_sample', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Sample description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductFinalTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('tags', models.TextField(blank=True, null=True, verbose_name='TAGs')),
('description_sample', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Sample description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('image', codenerix.fields.ImageAngularField(max_length=200, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('name_file', models.CharField(blank=True, max_length=254, null=True, verbose_name='Name')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('principal', models.BooleanField(default=False, verbose_name='Principal')),
('flagship_product', models.BooleanField(default=False, verbose_name='Flagship product')),
('outstanding', models.BooleanField(default=False, verbose_name='Outstanding')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductImageTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductImageTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('description', models.CharField(max_length=250, verbose_name='Description')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductRelationSold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('hits', models.SmallIntegerField(blank=True, null=True, verbose_name='Hits')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductTextTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('tags', models.TextField(blank=True, null=True, verbose_name='TAGs')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductTextTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('description_short', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description short')),
('description_long', codenerix.fields.WysiwygAngularField(blank=True, null=True, verbose_name='Description long')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('tags', models.TextField(blank=True, null=True, verbose_name='TAGs')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='ProductUnique',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('value', models.CharField(blank=True, max_length=80, null=True, verbose_name='Value')),
('stock_real', models.FloatField(default=0, verbose_name='Stock real')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='Subcategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('code', models.CharField(blank=True, max_length=250, null=True, unique=True, verbose_name='Code')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('show_menu', models.BooleanField(default=True, verbose_name='Show menu')),
('show_brand', models.BooleanField(default=True, verbose_name='Show brand (for menu)')),
('outstanding', models.BooleanField(default=False, verbose_name='Outstanding')),
('order', models.SmallIntegerField(blank=True, null=True, verbose_name='Order')),
('image', codenerix.fields.ImageAngularField(blank=True, help_text='Se aconseja un tamaño comprendido entre 1200px y 2000px', max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Image')),
('icon', codenerix.fields.ImageAngularField(blank=True, help_text='Se aconseja que sea una imagen superior a 200px transparente y en formato png o svg', max_length=200, null=True, upload_to=codenerix.lib.helpers.upload_path, verbose_name='Icon')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subcategory', to='codenerix_products.Category', verbose_name='Category')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='SubcategoryTextEN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
('subcategory', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='en', to='codenerix_products.Subcategory')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='SubcategoryTextES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('meta_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Title')),
('meta_description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=160, null=True, verbose_name='Meta Keywords')),
('slug', models.CharField(max_length=250, unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=250, verbose_name='Name')),
('description', codenerix.fields.WysiwygAngularField(verbose_name='Description')),
('subcategory', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='es', to='codenerix_products.Subcategory')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
migrations.CreateModel(
name='TypeTax',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('tax', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)], verbose_name='Tax (%)')),
('name', models.CharField(max_length=250, unique=True, verbose_name='Name')),
('default', models.BooleanField(default=False, verbose_name='Default')),
('recargo_equivalencia', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)], verbose_name='Recargo de equivalencia (%)')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
]
| apache-2.0 |
google-code/android-scripting | python/src/Lib/test/test_signal.py | 52 | 13731 | import unittest
from test import test_support
from contextlib import closing, nested
import gc
import pickle
import select
import signal
import subprocess
import traceback
import sys, os, time, errno
if sys.platform[:3] in ('win', 'os2') or sys.platform == 'riscos':
raise test_support.TestSkipped("Can't test signal on %s" % \
sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
if test_support.verbose:
print "handlerA invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1))
def handlerB(self, signum, frame):
self.b_called = True
if test_support.verbose:
print "handlerB invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1))
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
if test_support.verbose:
print "test runner's pid is", pid
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not thrown')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
if test_support.verbose:
print "HandlerBCalled exception caught"
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
if test_support.verbose:
print "KeyboardInterrupt (the alarm() went off)"
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# throws. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with nested(closing(os.fdopen(os_done_r)),
closing(os.fdopen(os_done_w, 'w'))) as (done_r, done_w):
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print 'Uh oh, raised from pickle.'
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
class BasicSignalTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEquals(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEquals(signal.getsignal(signal.SIGHUP), hup)
class WakeupSignalTests(unittest.TestCase):
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
def test_wakeup_fd_early(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(self.TIMEOUT_FULL)
mid_time = time.time()
self.assert_(mid_time - before_time < self.TIMEOUT_HALF)
select.select([self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assert_(after_time - mid_time < self.TIMEOUT_HALF)
def test_wakeup_fd_during(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
self.assertRaises(select.error, select.select,
[self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assert_(after_time - before_time < self.TIMEOUT_HALF)
def setUp(self):
import fcntl
self.alrm = signal.signal(signal.SIGALRM, lambda x,y:None)
self.read, self.write = os.pipe()
flags = fcntl.fcntl(self.write, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(self.write, fcntl.F_SETFL, flags)
self.old_wakeup = signal.set_wakeup_fd(self.write)
def tearDown(self):
signal.set_wakeup_fd(self.old_wakeup)
os.close(self.read)
os.close(self.write)
signal.signal(signal.SIGALRM, self.alrm)
class SiginterruptTest(unittest.TestCase):
signum = signal.SIGUSR1
def readpipe_interrupted(self, cb):
r, w = os.pipe()
ppid = os.getpid()
pid = os.fork()
oldhandler = signal.signal(self.signum, lambda x,y: None)
cb()
if pid==0:
# child code: sleep, kill, sleep. and then exit,
# which closes the pipe from which the parent process reads
try:
time.sleep(0.2)
os.kill(ppid, self.signum)
time.sleep(0.2)
finally:
exit_subprocess()
try:
os.close(w)
try:
d=os.read(r, 1)
return False
except OSError, err:
if err.errno != errno.EINTR:
raise
return True
finally:
signal.signal(self.signum, oldhandler)
os.waitpid(pid, 0)
def test_without_siginterrupt(self):
i=self.readpipe_interrupted(lambda: None)
self.assertEquals(i, True)
def test_siginterrupt_on(self):
i=self.readpipe_interrupted(lambda: signal.siginterrupt(self.signum, 1))
self.assertEquals(i, True)
def test_siginterrupt_off(self):
i=self.readpipe_interrupted(lambda: signal.siginterrupt(self.signum, 0))
self.assertEquals(i, False)
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
if test_support.verbose:
print("SIGALRM handler invoked", args)
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
if test_support.verbose:
print("last SIGVTALRM handler call")
self.hndl_count += 1
if test_support.verbose:
print("SIGVTALRM handler invoked", args)
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
if test_support.verbose:
print("SIGPROF handler invoked", args)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
if test_support.verbose:
print("\ncall pause()...")
signal.pause()
self.assertEqual(self.hndl_called, True)
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
for i in xrange(100000000):
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
# virtual itimer should be (0.0, 0.0) now
self.assertEquals(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEquals(self.hndl_called, True)
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
for i in xrange(100000000):
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
# profiling itimer should be (0.0, 0.0) now
self.assertEquals(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_main():
test_support.run_unittest(BasicSignalTests, InterProcessSignalTests,
WakeupSignalTests, SiginterruptTest, ItimerTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
mastercoin/mastercoin-source | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
waynr/jenkins-job-builder | jenkins_jobs/sphinx/yaml.py | 42 | 4925 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Most of this code originated in sphinx.domains.python and
# sphinx.ext.autodoc and has been only slightly adapted for use in
# subclasses here.
# :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
# :license: BSD, see LICENSE for details.
import re
from sphinx.ext.autodoc import Documenter, FunctionDocumenter
from sphinx.domains.python import PyModulelevel, _pseudo_parse_arglist
from sphinx import addnodes
from sphinx.locale import _
yaml_sig_re = re.compile('yaml:\s*(.*)')
class PyYAMLFunction(PyModulelevel):
def handle_signature(self, sig, signode):
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
name_prefix = None
name = sig
arglist = None
retann = None
# determine module and class name (if applicable), as well as full name
modname = self.options.get(
'module', self.env.temp_data.get('py:module'))
classname = self.env.temp_data.get('py:class')
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
_pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name_cls):
return _('%s (in module %s)') % (name_cls[0], modname)
class YAMLFunctionDocumenter(FunctionDocumenter):
priority = FunctionDocumenter.priority + 10
objtype = 'yamlfunction'
directivetype = 'yamlfunction'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
if not FunctionDocumenter.can_document_member(member, membername,
isattr, parent):
return False
if member.__doc__ is not None and yaml_sig_re.match(member.__doc__):
return True
return False
def _find_signature(self, encoding=None):
docstrings = Documenter.get_doc(self, encoding, 2)
if len(docstrings) != 1:
return
doclines = docstrings[0]
setattr(self, '__new_doclines', doclines)
if not doclines:
return
# match first line of docstring against signature RE
match = yaml_sig_re.match(doclines[0])
if not match:
return
name = match.group(1)
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
setattr(self, '__new_doclines', doclines[i:])
return name
def get_doc(self, encoding=None, ignore=1):
lines = getattr(self, '__new_doclines', None)
if lines is not None:
return [lines]
return Documenter.get_doc(self, encoding, ignore)
def format_signature(self):
result = self._find_signature()
self._name = result
return ''
def format_name(self):
return self._name
def setup(app):
app.add_autodocumenter(YAMLFunctionDocumenter)
app.add_directive_to_domain('py', 'yamlfunction', PyYAMLFunction)
| apache-2.0 |
kingmotley/SickRage | lib/hachoir_parser/archive/zip.py | 72 | 17756 | """
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.tools import makeUnicode
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: u"no compression",
1: u"Shrunk",
2: u"Reduced (factor 1)",
3: u"Reduced (factor 2)",
4: u"Reduced (factor 3)",
5: u"Reduced (factor 4)",
6: u"Imploded",
7: u"Tokenizing",
8: u"Deflate",
9: u"Deflate64",
10: u"PKWARE Imploding",
11: u"Reserved by PKWARE",
12: u"File is compressed using BZIP2 algorithm",
13: u"Reserved by PKWARE",
14: u"LZMA (EFS)",
15: u"Reserved by PKWARE",
16: u"Reserved by PKWARE",
17: u"Reserved by PKWARE",
18: u"File is compressed using IBM TERSE (new)",
19: u"IBM LZ77 z Architecture (PFS)",
98: u"PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: u"FAT file system (DOS, OS/2, NT)",
1: u"Amiga",
2: u"VMS (VAX or Alpha AXP)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari",
6: u"HPFS file system (OS/2, NT 3.x)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS file system (NT)",
12: u"SMS/QDOS",
13: u"Acorn RISC OS",
14: u"VFAT file system (Win95, NT)",
15: u"MVS",
16: u"BeOS (BeBox or PowerMac)",
17: u"Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte following
method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN)
yield Bit(self, "is_encrypted", "File is encrypted?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: #LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bits(self, "unused[]", 2, "Unused")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size.value, "Unknown field data")
class ExtraFields(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield ExtraField(self, "extra[]")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
for field in ZipStartCommonFields(self):
yield field
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = "\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data", size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address+self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
#self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
for field in ZipStartCommonFields(self):
yield field
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
for field in self.resync():
yield field
if self["flags/has_descriptor"].value and self['crc32'].value == 0:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number", \
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset", \
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MIME_TYPES = {
# Default ZIP archive
u"application/zip": "zip",
u"application/x-zip": "zip",
# Java archive (JAR)
u"application/x-jar": "jar",
u"application/java-archive": "jar",
# OpenOffice 1.0
u"application/vnd.sun.xml.calc": "sxc",
u"application/vnd.sun.xml.draw": "sxd",
u"application/vnd.sun.xml.impress": "sxi",
u"application/vnd.sun.xml.writer": "sxw",
u"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
u"application/vnd.sun.xml.calc.template": "stc",
u"application/vnd.sun.xml.draw.template": "std",
u"application/vnd.sun.xml.impress.template": "sti",
u"application/vnd.sun.xml.writer.template": "stw",
u"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
u"application/vnd.oasis.opendocument.chart": "odc",
u"application/vnd.oasis.opendocument.image": "odi",
u"application/vnd.oasis.opendocument.database": "odb",
u"application/vnd.oasis.opendocument.formula": "odf",
u"application/vnd.oasis.opendocument.graphics": "odg",
u"application/vnd.oasis.opendocument.presentation": "odp",
u"application/vnd.oasis.opendocument.spreadsheet": "ods",
u"application/vnd.oasis.opendocument.text": "odt",
u"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
u"application/vnd.oasis.opendocument.graphics-template": "otg",
u"application/vnd.oasis.opendocument.presentation-template": "otp",
u"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
u"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.itervalues()),
"mime": tuple(MIME_TYPES.iterkeys()),
"magic": (("PK\3\4", 0),),
"subfile": "skip",
"min_size": (4 + 26)*8, # header + file entry
"description": "ZIP archive"
}
def validate(self):
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
try:
file0 = self["file[0]"]
except HACHOIR_ERRORS, err:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
yield header
header = header.value
if header == FileEntry.HEADER:
yield FileEntry(self, "file[]")
elif header == ZipDataDescriptor.HEADER:
yield ZipDataDescriptor(self, "spanning[]")
elif header == 0x30304b50:
yield ZipDataDescriptor(self, "temporary_spanning[]")
elif header == ZipCentralDirectory.HEADER:
yield ZipCentralDirectory(self, "central_directory[]")
elif header == ZipEndCentralDirectory.HEADER:
yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory")
elif header == Zip64EndCentralDirectory.HEADER:
yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory")
elif header == ZipSignature.HEADER:
yield ZipSignature(self, "signature", "Signature")
elif header == Zip64EndCentralDirectoryLocator.HEADER:
yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator")
else:
raise ParserError("Error, unknown ZIP header (0x%08X)." % header)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return u"application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes("PK\5\6", start, end)
if end is not None:
return end + 22*8
return None
| gpl-3.0 |
stopstalk/stopstalk-deployment | modules/dashboard_cards.py | 1 | 35586 | """
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import utilities
import datetime
from stopstalk_constants import *
from gluon import current, IMG, DIV, TABLE, THEAD, HR, H5, B, \
TBODY, TR, TH, TD, A, SPAN, INPUT, I, P, FORM, \
TEXTAREA, SELECT, OPTION, URL, BUTTON, TAG, BR
# ==============================================================================
class BaseCard:
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.genre = self.__class__.__name__
self.user_id = user_id
self.cache_serializer = "json"
self.sunset_card_date = None
# --------------------------------------------------------------------------
def get_html(self, **args):
if len(args["cta_links"]) > 0:
actions_div = DIV(*args["cta_links"], _class="card-action")
else:
actions_div = ""
if "card_style" not in args:
args["card_style"] = ""
return DIV(DIV(DIV(SPAN(args["card_title"], _class="card-title"),
args["card_content"],
_class="card-content " + \
args["card_text_color_class"]),
actions_div,
_class="card stopstalk-dashboard-card " + \
args["card_color_class"],
_style=args["card_style"]),
_class="col s4")
# --------------------------------------------------------------------------
def get_data(self):
pass
# --------------------------------------------------------------------------
def get_cta_html(self):
cta_buttons = []
for cta in self.ctas:
cta_buttons.append(
A(cta["btn_text"],
_href=cta["btn_url"],
_class="btn btn-default stopstalk-dashboard-card-cta " + \
cta["btn_class"],
_target="_blank")
)
return cta_buttons
# --------------------------------------------------------------------------
def get_from_cache(self):
value = current.REDIS_CLIENT.get(self.cache_key)
if self.cache_serializer == "json":
return json.loads(value) if value else None
else:
return eval(value) if value else None
# --------------------------------------------------------------------------
def set_to_cache(self, value):
result = json.dumps(value) if self.cache_serializer == "json" else str(value)
current.REDIS_CLIENT.set(self.cache_key,
result,
ex=ONE_HOUR)
# --------------------------------------------------------------------------
@staticmethod
def enabled_check(func):
def wrapper(*args):
self_obj = args[0]
return (self_obj.sunset_card_date is None or \
(self_obj.sunset_card_date - datetime.datetime.now()).days > 0) and \
func(*args)
return wrapper
# ==============================================================================
class StreakCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id, kind):
self.kind = kind
self.key_name = "curr_%s_streak" % self.kind
self.user_id = user_id
self.card_title = "Keep your %s streak going!" % self.kind
self.cache_key = CARD_CACHE_REDIS_KEYS["curr_streak_prefix"] + str(self.user_id)
self.stats = None
self.ctas = [
dict(btn_url=URL("default",
"cta_handler",
vars=dict(kind="random")),
btn_text="Pick a Problem",
btn_class=self.kind + "-streak-card-pick-problem")
]
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
streak_value = self.get_data()
if self.kind == "day":
card_content = P("You're at a ",
B("%d day streak" % streak_value),
". Keep solving a new problem everyday!")
elif self.kind == "accepted":
card_content = P("You're at a ",
B("%d accepted problem streak" % streak_value),
".",
BR(),
"Let the greens rain!")
else:
return "FAILURE"
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return self.stats[self.key_name]
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
cache_value = self.get_from_cache()
if cache_value:
self.stats = cache_value
else:
self.stats = utilities.get_rating_information(self.user_id,
False,
True)
self.set_to_cache(self.stats)
return self.key_name in self.stats and \
self.stats[self.key_name] > 0
# ==============================================================================
class SuggestProblemCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.card_title = "Mood"
self.ctas = [
dict(btn_text="Easy",
btn_url=URL("default", "cta_handler",
vars=dict(kind="suggested_tag",
tag_category="Easy")),
btn_class="suggest-problem-card-easy"),
dict(btn_text="Medium",
btn_url=URL("default", "cta_handler",
vars=dict(kind="suggested_tag",
tag_category="Medium")),
btn_class="suggest-problem-card-medium"),
dict(btn_text="Hard",
btn_url=URL("default", "cta_handler",
vars=dict(kind="suggested_tag",
tag_category="Hard")),
btn_class="suggest-problem-card-hard")
]
BaseCard.__init__(self, self.user_id)
# --------------------------------------------------------------------------
def get_html(self):
streak_value = self.get_data()
card_content = P("Let's find you some problem that you can start solving.")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
return True
# ==============================================================================
class UpcomingContestCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.card_title = "Upcoming contests"
self.cache_key = CARD_CACHE_REDIS_KEYS["upcoming_contests"]
self.ctas = [
dict(btn_text="View all",
btn_url=URL("default", "contests"),
btn_class="upcoming-contests-card-view-all")
]
BaseCard.__init__(self, self.user_id)
# --------------------------------------------------------------------------
def get_html(self):
contest_data = self.get_data()
card_content_table = TABLE(
_class="bordered centered highlight",
_style="line-height: 20px"
)
tbody = TBODY()
for contest in contest_data:
start_time = datetime.datetime.strptime(contest["start_time"], "%Y-%m-%dT%H:%M:%S.000Z")
end_time = datetime.datetime.strptime(contest["end_time"], "%Y-%m-%dT%H:%M:%S.000Z")
start_time += datetime.timedelta(minutes=330)
end_time += datetime.timedelta(minutes=330)
contest["start_time"] = start_time
contest["end_time"] = end_time
tbody.append(TR(TD(contest["name"]),
TD(IMG(_src=current.get_static_url(
"images/%s_small.png" % str(contest["site"].lower())
),
_class="parent-site-icon-small")),
TD(A(I(_class="fa fa-external-link-square"),
_class="btn-floating btn-small accent-4 green view-contest",
_href=contest["url"],
_target="_blank")),
TD(utilities.get_reminder_button(contest))))
card_content_table.append(tbody)
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content_table,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
value = self.get_from_cache()
if value:
return value
contest_list = utilities.get_contests()
data = []
for contest in contest_list:
if contest["site"] not in CONTESTS_SITE_MAPPING:
continue
if contest["status"] == "CODING":
continue
contest["name"] = contest["name"].encode("ascii", "ignore")
data.append(contest)
if len(data) == 2:
break
self.set_to_cache(data)
return data
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
return True
# ==============================================================================
class RecentSubmissionsCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.card_title = "Recent Friends' submissions"
self.cache_key = CARD_CACHE_REDIS_KEYS["recent_submissions_prefix"] + str(user_id)
self.final_data = None
self.ctas = [
dict(btn_text="View all",
btn_url=URL("default", "submissions", args=[1]),
btn_class="recent-submissions-card-view-all")
]
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
submissions_data = self.get_data()
card_content_table = TABLE(
_class="bordered highlight"
)
tbody = TBODY()
for row in submissions_data:
user_record = utilities.get_user_records([row[0]], "id", "id", True)
tr = TR(TD(A(user_record.first_name + " " + user_record.last_name,
_href=URL("user", "profile",
args=user_record.stopstalk_handle,
extension=False),
_target="_blank")))
td = TD()
for site in row[1]:
if site == "total":
continue
else:
td.append(SPAN(IMG(_src=current.get_static_url(
"images/%s_small.png" % str(site).lower()
),
_class="parent-site-icon-very-small"),
" " + str(row[1][site]),
_style="padding-right: 10px;"))
tr.append(td)
tbody.append(tr)
card_content_table.append(tbody)
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content_table,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return self.final_data if self.final_data else "FAILURE"
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
final_data = self.get_from_cache()
if final_data:
pass
else:
import datetime
db = current.db
stable = db.submission
friends, _ = utilities.get_friends(self.user_id)
if len(friends):
today = datetime.datetime.today()
last_week = today - datetime.timedelta(days=7)
rows = db.executesql("""
SELECT user_id, site, count(*)
FROM submission
WHERE time_stamp >= "%s" AND
user_id in (%s) AND custom_user_id is NULL
GROUP BY 1, 2
ORDER BY 3 DESC
""" % (str(last_week.date()),
",".join([str(x) for x in friends])))
final_hash = {}
for row in rows:
if row[0] not in final_hash:
final_hash[row[0]] = {"total": 0}
final_hash[row[0]][row[1]] = int(row[2])
final_hash[row[0]]["total"] += int(row[2])
final_data = sorted(final_hash.items(),
key=lambda x: x[1]["total"],
reverse=True)[:3]
else:
final_data = []
self.set_to_cache(final_data)
if len(final_data) > 0:
self.final_data = final_data
return True
return False
# ==============================================================================
class AddMoreFriendsCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.card_title = "Add more friends"
self.cache_key = CARD_CACHE_REDIS_KEYS["add_more_friends_prefix"] + str(self.user_id)
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
data = self.get_data()
self.ctas = [
dict(btn_text="Show me",
btn_url=URL("default", "search",
vars={"institute": data["institute"]}),
btn_class="add-more-friends-card-institute-search")
]
card_content = P("You have ",
B(current.T("%s %%{friend}") % data["friend_count"]),
" on StopStalk. For a better competitive programming learning experience, we recommend you to add more friends.")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
cache_value = self.get_from_cache()
if cache_value:
return cache_value
record = utilities.get_user_records([self.user_id], "id", "id", True)
result = dict(institute=record["institute"],
friend_count=self.friend_count)
self.set_to_cache(result)
return result
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
db = current.db
user_record = utilities.get_user_records([self.user_id], "id", "id", True)
if user_record.institute == "Other":
# Don't show the card if the user's institute is Other
return False
self.friend_count = db(db.following.follower_id == self.user_id).count()
return self.friend_count <= 3
# ==============================================================================
class JobProfileCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.card_title = "Looking for job!"
self.cache_key = CARD_CACHE_REDIS_KEYS["job_profile_prefix"] + str(self.user_id)
self.ctas = [
dict(btn_text="Update job preferences",
btn_url=URL("default", "job_profile"),
btn_class="job-profile-card-update-preferences")
]
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
card_content = P("I am looking for a job and I want StopStalk to reach out to me for matching opportunities. Let me update my preferences.")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
pass
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
db = current.db
query = (db.resume_data.user_id == self.user_id)
return db(query).select().first() is None
# ==============================================================================
class LinkedAccountsCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.card_title = "Link more accounts"
self.cache_key = CARD_CACHE_REDIS_KEYS["more_accounts_prefix"] + str(self.user_id)
self.ctas = [
dict(btn_text="Update now",
btn_url=URL("user", "update_details"),
btn_class="linked-accounts-card-update-now")
]
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
count = self.get_data()
card_content = P("You have ",
B(current.T("%s %%{account}") % count),
" linked with StopStalk. Update your profile with more handles to track your competitive programming progress.")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return self.handle_count
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
cache_value = self.get_from_cache()
if cache_value:
self.handle_count = cache_value
else:
handle_count = 0
db = current.db
record = utilities.get_user_records([self.user_id], "id", "id", True)
for site in current.SITES:
if record[site.lower() + "_handle"]:
handle_count += 1
self.handle_count = handle_count
self.set_to_cache(self.handle_count)
return self.handle_count < 3
# ==============================================================================
class LastSolvedProblemCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.final_pid = None
self.card_title = "Giving back to the community!"
self.cache_key = CARD_CACHE_REDIS_KEYS["last_solved_problem_prefix"] + \
str(self.user_id)
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
problem_details = self.get_data()
self.ctas = [
dict(btn_text="Write Editorial",
btn_url=URL("problems", "editorials",
args=problem_details["id"],
vars=dict(write_editorial=True)),
btn_class="last-solved-problem-write-editorial"),
dict(btn_text="Suggest tags",
btn_url=URL("problems", "index",
vars=dict(problem_id=problem_details["id"],
suggest_tag=True)),
btn_class="last-solved-problem-suggest-tags"),
dict(btn_text="Suggest difficulty",
btn_url=URL("problems", "index",
vars=dict(problem_id=problem_details["id"],
suggest_difficulty=True)),
btn_class="last-solved-problem-suggest-difficulty")
]
card_content = SPAN("You just solved ",
utilities.problem_widget(problem_details["name"],
problem_details["link"],
"solved-problem",
"Solved Problem",
problem_details["id"]),
". You can write editorials on StopStalk and help the community.")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return self.problem_details
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
cache_value = self.get_from_cache()
if cache_value:
# This means one problem is already there which is supposed to be shown
self.problem_details = cache_value
self.final_pid = self.problem_details["id"]
return True
import datetime
from random import choice
db = current.db
stable = db.submission
query = (stable.user_id == self.user_id) & \
(stable.time_stamp >= datetime.datetime.today() - \
datetime.timedelta(days=current.PAST_DAYS)) & \
(stable.status == "AC")
pids = db(query).select(stable.problem_id,
distinct=True,
orderby=~stable.time_stamp,
limitby=(0, 10)).as_list()
try:
self.final_pid = choice(pids)["problem_id"]
except:
self.final_pid = None
if self.final_pid is None:
return False
else:
pdetails = utilities.get_problem_details(self.final_pid)
self.problem_details = {
"name": pdetails["name"],
"link": pdetails["link"],
"id": self.final_pid
}
self.set_to_cache(self.problem_details)
# ==============================================================================
class TrendingProblemsCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.final_pid = None
self.card_title = "Trending problems"
self.cache_key = CARD_CACHE_REDIS_KEYS["trending_problems"]
self.ctas = [
dict(btn_text="View all",
btn_url=URL("problems", "trending"),
btn_class="trending-problems-card-view-all")
]
BaseCard.__init__(self, user_id)
self.cache_serializer = "str"
# --------------------------------------------------------------------------
def get_html(self):
trending_problems = self.get_data()
from trending_utilities import draw_trending_table
trending_table = draw_trending_table(trending_problems,
None,
self.user_id)
card_content = trending_table
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return self.trending_problems
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
cache_value = self.get_from_cache()
if cache_value:
self.trending_problems = cache_value
return True
trending_problems = current.REDIS_CLIENT.get(GLOBALLY_TRENDING_PROBLEMS_CACHE_KEY)
if trending_problems is None:
return False
self.trending_problems = eval(trending_problems)[:2]
self.set_to_cache(self.trending_problems)
return True
# ==============================================================================
class SearchByTagCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.final_pid = None
self.card_title = "Pick a tag"
self.cache_key = CARD_CACHE_REDIS_KEYS["search_by_tag"]
self.ctas = []
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
card_content = DIV(FORM(INPUT(_type="text", _name="q",
_placeholder="Type some tag...",
_autocomplete="off"),
INPUT(_type="submit", _value="Search",
_class="btn btn-default"),
_action=URL("problems", "search"),
_method="GET",
_class="col offset-s1 s10 search-by-tag-card-submit"),
_class="row")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
return True
# ==============================================================================
class AtCoderHandleCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
self.final_pid = None
self.card_title = "Link AtCoder now!"
self.ctas = [
dict(btn_url=URL("user",
"update_details"),
btn_text="Update Now",
btn_class="atcoder-handle-card-update-now")
]
BaseCard.__init__(self, user_id)
self.sunset_card_date = datetime.datetime.strptime("2020-10-15 00:00:00",
"%Y-%m-%d %H:%M:%S")
# --------------------------------------------------------------------------
def get_html(self):
card_content = P("AtCoder has come up in our surveys multiple times and here we are. We now support AtCoder profiles :)")
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
db = current.db
user_record = utilities.get_user_records([self.user_id], "id", "id", True)
return user_record["atcoder_handle"] == ""
# ==============================================================================
class RecommendationsPageCard(BaseCard):
# --------------------------------------------------------------------------
def __init__(self, user_id):
self.user_id = user_id
BaseCard.__init__(self, user_id)
# --------------------------------------------------------------------------
def get_html(self):
if self.stale_recommendations:
self.card_title = "New problems to solve!"
card_content = TAG[""](SPAN("It has been more than a week since you last generated problem recommendations. Generate new ones and keep getting better by solving them."))
self.ctas = [
dict(btn_url=URL("problems",
"recommendations"),
btn_text="Generate recommendations",
btn_class="recommendations-card-generate-recommendations")
]
else:
self.card_title = "StopStalk can recommend now!"
card_content = TAG[""](SPAN("StopStalk will recommend you problems based on your past submissions."),
" ",
SPAN("Click on the"),
" ",
B("'Find me problems'"),
" ",
SPAN("button on the top and keep increasing your level gradually!"))
self.ctas = [
dict(btn_url=URL("problems",
"recommendations"),
btn_text="Find me problems",
btn_class="recommendations-card-find-me-problems")
]
card_html = BaseCard.get_html(self, **dict(
card_title=self.card_title,
card_content=card_content,
cta_links=self.get_cta_html(),
card_color_class="white",
card_text_color_class="black-text"
))
return card_html
# --------------------------------------------------------------------------
def get_data(self):
return
# --------------------------------------------------------------------------
@BaseCard.enabled_check
def should_show(self):
db = current.db
stable = db.submission
prtable = db.problem_recommendations
submission_count = db(stable.user_id == self.user_id).count()
last_recommended = db(prtable.user_id == self.user_id).select().last()
self.stale_recommendations = (last_recommended is not None and \
(datetime.datetime.now().date() - \
last_recommended.generated_at).days > 7)
return submission_count > 0 and \
(last_recommended is None or self.stale_recommendations)
# ============================================================================== | mit |
log2timeline/plaso | tests/cli/helpers/process_resources.py | 2 | 2513 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the process resources CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import process_resources
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class ProcessResourcesArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the process resources CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--process_memory_limit SIZE]
Test argument parser.
{0:s}:
--process_memory_limit SIZE, --process-memory-limit SIZE
Maximum amount of memory (data segment) a process is
allowed to allocate in bytes, where 0 represents no
limit. The default limit is 4294967296 (4 GiB). This
applies to both the main (foreman) process and the
worker processes. This limit is enforced by the
operating system and will supersede the worker memory
limit (--worker_memory_limit).
""".format(cli_test_lib.ARGPARSE_OPTIONS)
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
process_resources.ProcessResourcesArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
test_tool = tools.CLITool()
process_resources.ProcessResourcesArgumentsHelper.ParseOptions(
options, test_tool)
with self.assertRaises(errors.BadConfigObject):
process_resources.ProcessResourcesArgumentsHelper.ParseOptions(
options, None)
with self.assertRaises(errors.BadConfigOption):
options.process_memory_limit = 'bogus'
process_resources.ProcessResourcesArgumentsHelper.ParseOptions(
options, test_tool)
with self.assertRaises(errors.BadConfigOption):
options.process_memory_limit = -1
process_resources.ProcessResourcesArgumentsHelper.ParseOptions(
options, test_tool)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/error.py | 1 | 1473 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Error(Model):
"""Error.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~azure.mgmt.network.v2017_09_01.models.ErrorDetails]
:param inner_error:
:type inner_error: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetails]'},
'inner_error': {'key': 'innerError', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Error, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.inner_error = kwargs.get('inner_error', None)
| mit |
SebasSBM/django | tests/auth_tests/test_decorators.py | 279 | 4124 | from django.conf import settings
from django.contrib.auth import models
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from .test_views import AuthViewsTestCase
@override_settings(ROOT_URLCONF='auth_tests.urls')
class LoginRequiredTestCase(AuthViewsTestCase):
"""
Tests the login_required decorators
"""
def testCallable(self):
"""
Check that login_required is assignable to callable objects.
"""
class CallableView(object):
def __call__(self, *args, **kwargs):
pass
login_required(CallableView())
def testView(self):
"""
Check that login_required is assignable to normal views.
"""
def normal_view(request):
pass
login_required(normal_view)
def testLoginRequired(self, view_url='/login_required/', login_url=None):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
if login_url is None:
login_url = settings.LOGIN_URL
response = self.client.get(view_url)
self.assertEqual(response.status_code, 302)
self.assertIn(login_url, response.url)
self.login()
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
def testLoginRequiredNextUrl(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator with a login_url set.
"""
self.testLoginRequired(view_url='/login_required_login_url/',
login_url='/somewhere/')
class PermissionsRequiredDecoratorTest(TestCase):
"""
Tests for the permission_required decorator
"""
def setUp(self):
self.user = models.User.objects.create(username='joe', password='qwerty')
self.factory = RequestFactory()
# Add permissions auth.add_customuser and auth.change_customuser
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
self.user.user_permissions.add(*perms)
def test_many_permissions_pass(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_many_permissions_in_set_pass(self):
@permission_required({'auth.add_customuser', 'auth.change_customuser'})
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_single_permission_pass(self):
@permission_required('auth.add_customuser')
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_permissioned_denied_redirect(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser', 'non-existent-permission'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 302)
def test_permissioned_denied_exception_raised(self):
@permission_required([
'auth.add_customuser', 'auth.change_customuser', 'non-existent-permission'
], raise_exception=True)
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
self.assertRaises(PermissionDenied, a_view, request)
| bsd-3-clause |
abircse06/youtube-dl | youtube_dl/extractor/sohu.py | 92 | 7209 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_request,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
)
class SohuIE(InfoExtractor):
_VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
_TESTS = [{
'note': 'This video is available only in Mainland China',
'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
'md5': '29175c8cadd8b5cc4055001e85d6b372',
'info_dict': {
'id': '382479172',
'ext': 'mp4',
'title': 'MV:Far East Movement《The Illest》',
},
'skip': 'On available in China',
}, {
'url': 'http://tv.sohu.com/20150305/n409385080.shtml',
'md5': '699060e75cf58858dd47fb9c03c42cfb',
'info_dict': {
'id': '409385080',
'ext': 'mp4',
'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》',
}
}, {
'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml',
'md5': '9bf34be48f2f4dadcb226c74127e203c',
'info_dict': {
'id': '78693464',
'ext': 'mp4',
'title': '【爱范品】第31期:MWC见不到的奇葩手机',
}
}, {
'note': 'Multipart video',
'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml',
'info_dict': {
'id': '78910339',
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
},
'playlist': [{
'md5': 'bdbfb8f39924725e6589c146bc1883ad',
'info_dict': {
'id': '78910339_part1',
'ext': 'mp4',
'duration': 294,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'md5': '3e1f46aaeb95354fd10e7fca9fc1804e',
'info_dict': {
'id': '78910339_part2',
'ext': 'mp4',
'duration': 300,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'md5': '8407e634175fdac706766481b9443450',
'info_dict': {
'id': '78910339_part3',
'ext': 'mp4',
'duration': 150,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}]
}, {
'note': 'Video with title containing dash',
'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml',
'info_dict': {
'id': '78932792',
'ext': 'mp4',
'title': 'youtube-dl testing video',
},
'params': {
'skip_download': True
}
}]
def _real_extract(self, url):
def _fetch_data(vid_id, mytv=False):
if mytv:
base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
else:
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
req = compat_urllib_request.Request(base_data_url + vid_id)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
req.add_header('Ytdl-request-proxy', cn_verification_proxy)
return self._download_json(
req, video_id,
'Downloading JSON data for %s' % vid_id)
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mytv = mobj.group('mytv') is not None
webpage = self._download_webpage(url, video_id)
title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage))
vid = self._html_search_regex(
r'var vid ?= ?["\'](\d+)["\']',
webpage, 'video path')
vid_data = _fetch_data(vid, mytv)
if vid_data['play'] != 1:
if vid_data.get('status') == 12:
raise ExtractorError(
'Sohu said: There\'s something wrong in the video.',
expected=True)
else:
raise ExtractorError(
'Sohu said: The video is only licensed to users in Mainland China.',
expected=True)
formats_json = {}
for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
vid_id = vid_data['data'].get('%sVid' % format_id)
if not vid_id:
continue
vid_id = compat_str(vid_id)
formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)
part_count = vid_data['data']['totalBlocks']
playlist = []
for i in range(part_count):
formats = []
for format_id, format_data in formats_json.items():
allot = format_data['allot']
data = format_data['data']
clips_url = data['clipsURL']
su = data['su']
video_url = 'newflv.sohu.ccgslb.net'
cdnId = None
retries = 0
while 'newflv.sohu.ccgslb.net' in video_url:
params = {
'prot': 9,
'file': clips_url[i],
'new': su[i],
'prod': 'flash',
}
if cdnId is not None:
params['idc'] = cdnId
download_note = 'Downloading %s video URL part %d of %d' % (
format_id, i + 1, part_count)
if retries > 0:
download_note += ' (retry #%d)' % retries
part_info = self._parse_json(self._download_webpage(
'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)),
video_id, download_note), video_id)
video_url = part_info['url']
cdnId = part_info.get('nid')
retries += 1
if retries > 5:
raise ExtractorError('Failed to get video URL')
formats.append({
'url': video_url,
'format_id': format_id,
'filesize': data['clipsBytes'][i],
'width': data['width'],
'height': data['height'],
'fps': data['fps'],
})
self._sort_formats(formats)
playlist.append({
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
'duration': vid_data['data']['clipsDuration'][i],
'formats': formats,
})
if len(playlist) == 1:
info = playlist[0]
info['id'] = video_id
else:
info = {
'_type': 'multi_video',
'entries': playlist,
'id': video_id,
'title': title,
}
return info
| unlicense |
dls-controls/pymalcolm | tests/test_modules/test_builtin/test_choicepart.py | 1 | 1045 | import unittest
from malcolm.core import Controller, Process
from malcolm.modules.builtin.parts import ChoicePart
class TestChoicePart(unittest.TestCase):
def setUp(self):
self.o = ChoicePart(
name="cp", description="desc", choices=["a", "b"], value="a", writeable=True
)
self.c = Controller("mri")
self.c.add_part(self.o)
self.c.setup(Process("proc"))
def test_init(self):
assert self.o.name == "cp"
assert self.o.attr.value == "a"
assert self.o.attr.meta.description == "desc"
assert self.o.attr.meta.choices == ["a", "b"]
assert self.o.attr.meta.tags == ["widget:combo", "config:1"]
assert self.c.field_registry.fields[self.o] == [
("cp", self.o.attr, self.o.attr.set_value, False)
]
def test_setter(self):
assert self.o.attr.value == "a"
self.o.attr.set_value("b")
assert self.o.attr.value == "b"
with self.assertRaises(ValueError):
self.o.attr.set_value("c")
| apache-2.0 |
ruci00/redshift-gui | src/app.py | 1 | 9694 | from gi.repository import Gdk, Gio, GLib, Gtk
import sys
import re
from redshift import RedshiftHelper
from threading import Thread, Event
class RedshiftApp(Gtk.Application):
update_stopflag = Event()
def __init__(self, redshifthelper):
Gtk.Application.__init__(self)
GLib.set_prgname("Redshift Settings")
self.helper = redshifthelper
"""@type: RedshiftHelper"""
self.statusicon = Gtk.StatusIcon()
self.builder = Gtk.Builder()
self.builder.add_from_file('ui.glade')
self.builder.connect_signals(self)
self.window = self.builder.get_object('window')
"""@type : Gtk.Window """
styleprovider = Gtk.CssProvider()
styleprovider.load_from_path('ui.css')
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(),
styleprovider,
Gtk.STYLE_PROVIDER_PRIORITY_USER)
def build_headerbar(self):
headerbar = Gtk.HeaderBar()
headerbar.set_title(self.window.get_title())
headerbar.set_show_close_button(True)
powerimage = Gtk.Image()
powerimage.set_from_icon_name('system-shutdown-symbolic', Gtk.IconSize.BUTTON)
enabled_bt = Gtk.ToggleButton()
enabled_bt.set_image(powerimage)
enabled_bt.set_tooltip_text('Enable/disable Redshift')
enabled_bt.connect('toggled', self.on_enabledbt_toggled)
# TODO: add toggling event handler
headerbar.pack_start(enabled_bt)
hbox = Gtk.HBox(spacing=5)
location_bt = Gtk.Button.new_from_icon_name('find-location-symbolic', Gtk.IconSize.BUTTON)
"""@type: Gtk.Button"""
location_bt.connect('clicked', self.on_locationbt_clicked)
menu_bt = Gtk.MenuButton()
menuimage = Gtk.Image()
menuimage.set_from_icon_name('open-menu-symbolic', Gtk.IconSize.BUTTON)
menu_bt.set_image(menuimage)
popovermenu = Gio.Menu()
popovermenu.append("About", "app.about")
popover = Gtk.Popover().new_from_model(menu_bt, popovermenu)
menu_bt.set_popover(popover)
hbox.add(location_bt)
hbox.add(menu_bt)
headerbar.pack_end(hbox)
self.window.set_titlebar(headerbar)
def do_activate(self):
self.window.set_application(self)
self.build_headerbar()
UpdateThread(self.update_stopflag, self.builder, self.helper, ).start()
self.window.show_all()
def do_startup(self):
Gtk.Application.do_startup(self)
aboutaction = Gio.SimpleAction.new('about', None)
aboutaction.connect('activate', self.on_about)
self.add_action(aboutaction)
def on_window_delete_event(self, *args):
self.helper.stop()
self.update_stopflag.set()
Gtk.main_quit()
def on_enabledbt_toggled(self, button: Gtk.ToggleButton):
if button.get_active():
self.helper.start()
else:
self.helper.stop()
def on_locationbt_clicked(self, button):
dialog = LocationDialog(self.window)
dialog.latentry.set_text(str(self.helper.location[0]))
dialog.lonentry.set_text(str(self.helper.location[1]))
dialog.connect('response', self.on_locationdialog_response)
dialog.run()
def on_locationdialog_response(self, dialog, response):
def get_entry_value(entry):
if not re.match(r'^[-+]?\d*\.?\d+$', entry.get_text()):
msg_dialog = Gtk.MessageDialog(self.window, type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK)
if entry.get_text():
msg_dialog.set_markup("Invalid value: " + entry.get_text())
else:
msg_dialog.set_markup("Fields cannot be empty!")
msg_dialog.set_title("Error")
msg_dialog.run()
msg_dialog.destroy()
else:
return float(entry.get_text())
if response == Gtk.ResponseType.OK:
lat = get_entry_value(dialog.latentry)
lon = None
if lat:
lon = get_entry_value(dialog.lonentry)
if lon:
self.helper.location = (lat, lon)
dialog.destroy()
else:
dialog.destroy()
def on_autotempradio_toggled(self, button: Gtk.RadioButton):
active = button.get_active()
self.builder.get_object('autotempgrid').set_sensitive(active)
if active:
dayadj = self.builder.get_object('daytempadj')
nightadj = self.builder.get_object('nighttempadj')
self.helper.temperature = (dayadj.get_value(), nightadj.get_value())
def on_fixedtempradio_toggled(self, button):
active = button.get_active()
self.builder.get_object('fixedtempscale').set_sensitive(active)
if active:
adj = self.builder.get_object('fixedtempadj')
self.helper.temperature = adj.get_value()
def on_autobrightradio_toggled(self, button):
active = button.get_active()
self.builder.get_object('autobrightgrid').set_sensitive(active)
if active:
dayadj = self.builder.get_object('daybrightadj')
nightadj = self.builder.get_object('nightbrightadj')
self.helper.brightness = (dayadj.get_value() / 100, nightadj.get_value() / 100)
def on_fixedbrightradio_toggled(self, button):
active = button.get_active()
self.builder.get_object('fixedbrightscale').set_sensitive(active)
if active:
adj = self.builder.get_object('fixedbrightadj')
self.helper.brightness = adj.get_value() / 100
def on_daytempadj_value_changed(self, adjustment):
self.helper.temperature = (adjustment.get_value(), self.helper.temperature[1])
def on_nighttempadj_value_changed(self, adjustment):
self.helper.temperature = (self.helper.temperature[0], adjustment.get_value())
def on_fixedtempadj_value_changed(self, adjustment):
self.helper.temperature = adjustment.get_value()
def on_daybrightadj_value_changed(self, adjustment):
self.helper.brightness = (adjustment.get_value() / 100, self.helper.brightness[1])
def on_nightbrightadj_value_changed(self, adjustment):
self.helper.brightness = (self.helper.brightness[0], adjustment.get_value() / 100)
def on_fixedbrightadj_value_changed(self, adjustment):
self.helper.brightness = adjustment.get_value() / 100
@staticmethod
def on_tempscales_format_value(scale, value):
return str(int(value)) + "K"
@staticmethod
def on_brightscales_format_value(scale, value):
return str(int(value)) + "%"
def on_about(self, *args):
about = AboutDialog(self.helper, self.window)
about.run()
about.destroy()
class UpdateThread(Thread):
def __init__(self, event, builder, redshifthelper, *args):
Thread.__init__(self, *args)
self.stopped = event
self.builder = builder
self.helper = redshifthelper
self.labels = (
self.builder.get_object('periodlabel'),
self.builder.get_object('colortemplabel'),
self.builder.get_object('brightnesslabel')
)
def run(self):
self.update()
while not self.stopped.wait(0.5):
self.update()
def update(self):
info = self.helper.getinfo()
Gdk.threads_enter()
self.labels[0].set_markup(info[0])
self.labels[1].set_markup('Color temperature: ' + info[1])
self.labels[2].set_markup('Brightness: ' + str(int(float(info[2]) * 100)) + '%')
Gdk.threads_leave()
class AboutDialog(Gtk.AboutDialog):
def __init__(self, helper, parent=None):
Gtk.AboutDialog.__init__(self, parent=parent)
self.set_version('0.1\n' + helper.getname())
self.set_logo_icon_name('redshift')
self.set_website('https://github.com/ruci00/redshift-gui')
self.set_website_label('GitHub')
self.set_license_type(Gtk.License.GPL_2_0)
self.set_authors(["Adam Rutkowski <a_rutkowski@outlook.com>"])
self.set_destroy_with_parent(True)
class LocationDialog(Gtk.Dialog):
def __init__(self, parent=None):
Gtk.Dialog.__init__(self, title="Set location", parent=parent, use_header_bar=True)
self.set_modal(True)
grid = Gtk.Grid()
grid.set_column_spacing(5)
grid.set_row_spacing(5)
grid.set_border_width(10)
latlabel = Gtk.Label("Latitude:")
grid.add(latlabel)
self.latentry = Gtk.Entry()
# self.latentry.connect('insert-text', self.on_lonlatentries_inserttext)
grid.attach(self.latentry, 1, 0, 1, 1)
lonlabel = Gtk.Label("Longitude:")
grid.attach(lonlabel, 2, 0, 1, 1)
self.lonentry = Gtk.Entry()
# self.lonentry.connect('changed', self.on_lonentry_changed)
grid.attach(self.lonentry, 3, 0, 1, 1)
self.add_buttons("Cancel", Gtk.ResponseType.CANCEL,
"Save", Gtk.ResponseType.OK)
self.get_content_area().add(grid)
self.show_all()
class NoRedshiftDialog(Gtk.MessageDialog):
def __init__(self):
Gtk.MessageDialog.__init__(self, type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK)
self.set_title("Redshift Settings")
self.set_markup("Cannot find <i>redshift</i>! Program will be terminated.")
if __name__ == '__main__':
redshift = RedshiftHelper()
if not redshift.isavailable():
NoRedshiftDialog().run()
sys.exit()
app = RedshiftApp(redshift)
exit_status = app.run(sys.argv)
sys.exit(exit_status)
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/maps/azure-mgmt-maps/azure/mgmt/maps/_azure_maps_management_client.py | 1 | 4203 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import AzureMapsManagementClientConfiguration
from .operations import AccountsOperations
from .operations import MapsOperations
from .operations import CreatorsOperations
from . import models
class AzureMapsManagementClient(object):
"""Azure Maps.
:ivar accounts: AccountsOperations operations
:vartype accounts: azure.mgmt.maps.operations.AccountsOperations
:ivar maps: MapsOperations operations
:vartype maps: azure.mgmt.maps.operations.MapsOperations
:ivar creators: CreatorsOperations operations
:vartype creators: azure.mgmt.maps.operations.CreatorsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = AzureMapsManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.accounts = AccountsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.maps = MapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.creators = CreatorsOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AzureMapsManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| mit |
TellinaTool/awesome_nmt | bashlint/data_tools.py | 1 | 15560 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Domain-specific natural Language and bash command tokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if sys.version_info > (3, 0):
from six.moves import xrange
from bashlint import bash, lint, nast
flag_suffix = '<FLAG_SUFFIX>'
def correct_errors_and_normalize_surface(cm):
return lint.correct_errors_and_normalize_surface(cm)
def get_utility_statistics(utility):
return lint.get_utility_statistics(utility)
def get_utilities(ast):
def get_utilities_fun(node):
utilities = set([])
if node.is_utility():
utilities.add(node.value)
for child in node.children:
utilities = utilities.union(get_utilities_fun(child))
elif not node.is_argument():
for child in node.children:
utilities = utilities.union(get_utilities_fun(child))
return utilities
if not ast:
return set([])
else:
return get_utilities_fun(ast)
def bash_tokenizer(cmd, recover_quotation=True, loose_constraints=False,
ignore_flag_order=False, arg_type_only=False, keep_common_args=False, with_flag_head=False,
with_flag_argtype=False, with_prefix=False, verbose=False):
"""
Tokenize a bash command.
"""
tree = lint.normalize_ast(cmd, recover_quotation, verbose=verbose)
return ast2tokens(tree, loose_constraints, ignore_flag_order,
arg_type_only, keep_common_args=keep_common_args, with_flag_head=with_flag_head,
with_prefix=with_prefix, with_flag_argtype=with_flag_argtype)
def bash_parser(cmd, recover_quotation=True, verbose=False):
"""
Parse bash command into AST.
"""
return lint.normalize_ast(cmd, recover_quotation, verbose=verbose)
def ast2tokens(node, loose_constraints=False, ignore_flag_order=False,
arg_type_only=False, keep_common_args=False,
with_arg_type=False, with_flag_head=False,
with_flag_argtype=False, with_prefix=False,
indexing_args=False):
"""
Convert a bash ast into a list of tokens.
:param loose_constraints: If set, do not check semantic coherence between
flags and arguments.
:param ignore_flag_order: If set, output flags in alphabetical order.
:param arg_type_only: If set, output argument semantic types instead of the
actual value.
:param: keep_common_args: If set, keep common arguments such as "/", "."
and do not replace them with semantic types. Effective only when
arg_type_only is set.
:param with_arg_type: If set, append argument type to argument token.
:param with_flag_head: If set, add utility prefix to flag token.
:param with_flag_argtype: If set, append argument type suffix to flag token.
:param with_prefix: If set, add node kind prefix to token.
:param indexing_args: If set, append order index to argument token.
"""
if not node:
return []
lc = loose_constraints
def to_tokens_fun(node):
tokens = []
if node.is_root():
assert(loose_constraints or node.get_num_of_children() == 1)
if lc:
for child in node.children:
tokens += to_tokens_fun(child)
else:
tokens = to_tokens_fun(node.children[0])
elif node.kind == "pipeline":
assert(loose_constraints or node.get_num_of_children() > 1)
if lc and node.get_num_of_children() < 1:
tokens.append("|")
elif lc and node.get_num_of_children() == 1:
# treat "singleton-pipe" as atomic command
tokens += to_tokens_fun(node.children[0])
else:
for child in node.children[:-1]:
tokens += to_tokens_fun(child)
tokens.append("|")
tokens += to_tokens_fun(node.children[-1])
elif node.kind == "commandsubstitution":
assert(loose_constraints or node.get_num_of_children() == 1)
if lc and node.get_num_of_children() < 1:
tokens += ["$(", ")"]
else:
tokens.append("$(")
tokens += to_tokens_fun(node.children[0])
tokens.append(")")
elif node.kind == "processsubstitution":
assert(loose_constraints or node.get_num_of_children() == 1)
if lc and node.get_num_of_children() < 1:
tokens.append(node.value + "(")
tokens.append(")")
else:
tokens.append(node.value + "(")
tokens += to_tokens_fun(node.children[0])
tokens.append(")")
elif node.is_utility():
token = node.value
if with_prefix:
token = node.prefix + token
tokens.append(token)
children = sorted(node.children, key=lambda x:x.value) \
if ignore_flag_order else node.children
for child in children:
tokens += to_tokens_fun(child)
elif node.is_option():
assert(loose_constraints or node.parent)
if '::' in node.value and (node.value.startswith('-exec') or
node.value.startswith('-ok')):
value, op = node.value.split('::')
token = value
else:
token = node.value
if with_flag_head:
if node.parent:
token = node.utility.value + "@@" + token
else:
token = token
if with_prefix:
token = node.prefix + token
if with_flag_argtype:
suffix = ''
if node.children:
for child in node.children:
if child.is_argument():
suffix += child.arg_type
elif child.is_utility():
suffix += 'UTILITY'
token = token + flag_suffix + suffix
tokens.append(token)
for child in node.children:
tokens += to_tokens_fun(child)
if '::' in node.value and (node.value.startswith('-exec') or
node.value.startswith('-ok')):
if op == ';':
op = "\\;"
tokens.append(op)
elif node.kind == 'operator':
tokens.append(node.value)
elif node.kind == "binarylogicop":
assert(loose_constraints or node.get_num_of_children() == 0)
if lc and node.get_num_of_children() > 0:
for child in node.children[:-1]:
tokens += to_tokens_fun(child)
tokens.append(node.value)
tokens += to_tokens_fun(node.children[-1])
else:
tokens.append(node.value)
elif node.kind == "unarylogicop":
assert(loose_constraints or node.get_num_of_children() == 0)
if lc and node.get_num_of_children() > 0:
if node.associate == nast.UnaryLogicOpNode.RIGHT:
tokens.append(node.value)
tokens += to_tokens_fun(node.children[0])
else:
tokens += to_tokens_fun(node.children[0])
tokens.append(node.value)
else:
tokens.append(node.value)
elif node.kind == "bracket":
assert(loose_constraints or node.get_num_of_children() >= 1)
if lc and node.get_num_of_children() < 2:
for child in node.children:
tokens += to_tokens_fun(child)
else:
tokens.append("\\(")
for i in xrange(len(node.children)-1):
tokens += to_tokens_fun(node.children[i])
tokens += to_tokens_fun(node.children[-1])
tokens.append("\\)")
elif node.kind == "nt":
assert(loose_constraints or node.get_num_of_children() > 0)
tokens.append("(")
for child in node.children:
tokens += to_tokens_fun(child)
tokens.append(")")
elif node.is_argument() or node.kind in ["t"]:
assert(loose_constraints or node.get_num_of_children() == 0)
if arg_type_only and node.is_open_vocab():
if (keep_common_args and node.parent.is_utility() and
node.parent.value == 'find' and node.value in bash.find_common_args):
# keep frequently-occurred arguments in the vocabulary
# TODO: define the criteria for "common args"
token = node.value
else:
if node.arg_type in bash.quantity_argument_types:
if node.value.startswith('+'):
token = '+{}'.format(node.arg_type)
elif node.value.startswith('-'):
token = '-{}'.format(node.arg_type)
else:
token = node.arg_type
else:
token = node.arg_type
else:
token = node.value
if with_prefix:
token = node.prefix + token
if with_arg_type:
token = token + "_" + node.arg_type
if indexing_args and node.to_index():
token = token + "-{:02d}".format(node.index)
tokens.append(token)
if lc:
for child in node.children:
tokens += to_tokens_fun(child)
return tokens
return to_tokens_fun(node)
def ast2command(node, loose_constraints=False, ignore_flag_order=False):
return lint.serialize_ast(node, loose_constraints=loose_constraints,
ignore_flag_order=ignore_flag_order)
def ast2template(node, loose_constraints=False, ignore_flag_order=False,
arg_type_only=True, indexing_args=False,
keep_common_args=False):
"""
Convert a bash AST to a template that contains only reserved words and
argument types flags are alphabetically ordered.
"""
tokens = ast2tokens(node, loose_constraints, ignore_flag_order,
arg_type_only=arg_type_only,
indexing_args=indexing_args,
keep_common_args=keep_common_args)
return ' '.join(tokens)
def cmd2template(cmd, recover_quotation=True, arg_type_only=True,
loose_constraints=False, verbose=False):
"""
Convert a bash command to a template that contains only reserved words
and argument types flags are alphabetically ordered.
"""
tree = lint.normalize_ast(cmd, recover_quotation, verbose=verbose)
return ast2template(tree, loose_constraints=loose_constraints,
arg_type_only=arg_type_only)
def pretty_print(node, depth=0):
"""
Pretty print the AST.
"""
try:
str = " " * depth + node.kind.upper() + '(' + node.value + ')'
if node.is_argument():
str += '<' + node.arg_type + '>'
print(str)
for child in node.children:
pretty_print(child, depth+1)
except AttributeError:
print(" " * depth)
def ast2list(node, order='dfs', _list=None, ignore_flag_order=False,
arg_type_only=False, keep_common_args=False,
with_flag_head=False, with_prefix=False):
"""
Linearize the AST.
"""
if order == 'dfs':
if node.is_argument() and node.is_open_vocab() and arg_type_only:
token = node.arg_type
elif node.is_option() and with_flag_head:
token = node.utility.value + '@@' + node.value if node.utility \
else node.value
else:
token = node.value
if with_prefix:
token = node.prefix + token
_list.append(token)
if node.get_num_of_children() > 0:
if node.is_utility() and ignore_flag_order:
children = sorted(node.children, key=lambda x:x.value)
else:
children = node.children
for child in children:
ast2list(child, order, _list, ignore_flag_order, arg_type_only,
keep_common_args, with_flag_head, with_prefix)
_list.append(nast._H_NO_EXPAND)
else:
_list.append(nast._V_NO_EXPAND)
return _list
# --- Other syntax parsers --- #
def paren_parser(line):
"""A simple parser for parenthesized sequence."""
def order_child_fun(node):
for child in node.children:
order_child_fun(child)
if len(node.children) > 1 and node.children[0].value in ["and", "or"]:
node.children = node.children[:1] + sorted(node.children[1:],
key=lambda x:(x.value if x.kind == "t" else (
x.children[0].value if x.children else x.value)))
if not line.startswith("("):
line = "( " + line
if not line.endswith(")"):
line = line + " )"
words = line.strip().split()
root = nast.Node(kind="root", value="root")
stack = []
i = 0
while i < len(words):
word = words[i]
if word == "(":
if stack:
# creates non-terminal
node = nast.Node(kind="nt", value="<n>")
stack[-1].add_child(node)
node.parent = stack[-1]
stack.append(node)
else:
stack.append(root)
elif word == ")":
if stack:
stack.pop()
else:
node = nast.Node(kind="t", value=word)
stack[-1].add_child(node)
node.parent = stack[-1]
i += 1
if len(stack) == 0:
break
# order nodes
order_child_fun(root)
return root
# --- Test functions --- #
def batch_parse(input_file):
"""
Parse the input_file each line of which is a bash command.
"""
with open(input_file) as f:
i = 0
for cmd in f:
print("{}. {}".format(i, cmd))
ast = bash_parser(cmd)
pretty_print(ast)
i += 1
def test_bash_parser():
while True:
try:
cmd = input("> ")
norm_tree = bash_parser(cmd)
# pruned_tree = normalizer.prune_ast(norm_tree)
print()
print("AST:")
pretty_print(norm_tree, 0)
# print("Pruned AST:")
# pretty_print(pruned_tree, 0)
# search_history = ast2list(norm_tree, 'dfs', list=[])
# for state in search_history:
# print(state)
print(get_utilities(norm_tree))
print("Command Template:")
print(ast2template(norm_tree, ignore_flag_order=False))
print("Command: ")
print(ast2command(norm_tree, ignore_flag_order=False))
# print("Pruned Command Template:")
# print(ast2template(pruned_tree, ignore_flag_order=False))
print()
except EOFError as ex:
break
if __name__ == "__main__":
# input_file = sys.argv[1]
# batch_parse(input_file)
test_bash_parser()
| gpl-3.0 |
verdurin/easybuild-easyblocks | easybuild/easyblocks/h/hdf5.py | 10 | 4386 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing HDF5, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_HDF5(ConfigureMake):
"""Support for building/installing HDF5"""
def configure_step(self):
"""Configure build: set require config and make options, and run configure script."""
# configure options for dependencies
deps = [
("Szip", "--with-szlib"),
("zlib", "--with-zlib"),
]
for (dep, opt) in deps:
root = get_software_root(dep)
if root:
self.cfg.update('configopts', '%s=%s' % (opt, root))
else:
raise EasyBuildError("Dependency module %s not loaded.", dep)
fcomp = 'FC="%s"' % os.getenv('F90')
self.cfg.update('configopts', "--with-pic --with-pthread --enable-shared")
self.cfg.update('configopts', "--enable-cxx --enable-fortran %s" % fcomp)
# MPI and C++ support enabled requires --enable-unsupported, because this is untested by HDF5
# also returns False if MPI is not supported by this toolchain
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "--enable-unsupported --enable-parallel")
else:
self.cfg.update('configopts', "--disable-parallel")
# make options
self.cfg.update('buildopts', fcomp)
# set RUNPARALLEL if MPI is not enabled (or not supported by this toolchain)
if self.toolchain.options.get('usempi', None):
env.setvar('RUNPARALLEL', 'mpirun -np \$\${NPROCS:=2}')
super(EB_HDF5, self).configure_step()
# default make and make install are ok
def sanity_check_step(self):
"""
Custom sanity check for HDF5
"""
# also returns False if MPI is not supported by this toolchain
if self.toolchain.options.get('usempi', None):
extra_binaries = ["bin/%s" % x for x in ["h5perf", "h5pcc", "h5pfc", "ph5diff"]]
else:
extra_binaries = ["bin/%s" % x for x in ["h5cc", "h5fc"]]
custom_paths = {
'files': ["bin/h5%s" % x for x in ["2gif", "c++", "copy", "debug", "diff",
"dump", "import", "jam","ls", "mkgrp",
"perf_serial", "redeploy", "repack",
"repart", "stat", "unjam"]] +
["bin/gif2h5"] + extra_binaries +
["lib/libhdf5%s.so" % x for x in ["_cpp", "_fortran", "_hl_cpp",
"_hl", "hl_fortran", ""]],
'dirs': ['include']
}
super(EB_HDF5, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
rokuz/omim | tools/python/booking_hotels_quality.py | 20 | 2632 | #!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import namedtuple, defaultdict
from datetime import datetime
from sklearn import metrics
import argparse
import base64
import json
import logging
import matplotlib.pyplot as plt
import os
import pickle
import time
import urllib2
import re
# init logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
def load_binary_list(path):
"""Loads reference binary classifier output. """
bits = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
bits.append(1 if line.startswith('y') else 0)
return bits
def load_score_list(path):
"""Loads list of matching scores. """
scores = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
scores.append(float(re.search(r'result score: (\d*\.\d+)', line).group(1)))
return scores
def process_options():
# TODO(mgsergio): Fix description.
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--reference_list", dest="reference_list", help="Path to data files")
parser.add_argument("--sample_list", dest="sample_list", help="Name and destination for output file")
parser.add_argument("--show", dest="show", default=False, action="store_true",
help="Show graph for precision and recall")
options = parser.parse_args()
if not options.reference_list or not options.sample_list:
parser.print_help()
exit()
return options
def main():
options = process_options()
reference = load_binary_list(options.reference_list)
sample = load_score_list(options.sample_list)
precision, recall, threshold = metrics.precision_recall_curve(reference, sample)
aa = zip(precision, recall, threshold)
max_by_hmean = max(aa, key=lambda (p, r, t): p*r/(p+r))
print("Optimal threshold: {2} for precision: {0} and recall: {1}".format(*max_by_hmean))
print("AUC: {0}".format(metrics.roc_auc_score(reference, sample)))
if options.show:
plt.plot(recall, precision)
plt.title("Precision/Recall")
plt.ylabel("Precision")
plt.xlabel("Recall")
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
Pi03k/py3specto | spectlib/plugins/watch_system_port.py | 2 | 3542 | # -*- coding: utf-8 -*-
# Specto , Unobtrusive event notifier
#
# watch_system_port.py
#
# See the AUTHORS file for copyright ownership information
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from spectlib.watch import Watch
import spectlib.config
import os
type = "Watch_system_port"
type_desc = _("Port")
icon = 'network-transmit-receive'
category = _("System")
def get_add_gui_info():
return [("port", spectlib.gtkconfig.Spinbutton(_("Port"), value=21, upper=65535))]
class Watch_system_port(Watch):
"""
Watch class that will check if a connection was established on a certain port
"""
def __init__(self, specto, id, values):
watch_values = [("port", spectlib.config.Integer(True))]
self.icon = icon
self.standard_open_command = ''
self.type_desc = type_desc
self.status = ""
#Init the superclass and set some specto values
Watch.__init__(self, specto, id, values, watch_values)
self.running = self.check_port()
def check(self):
""" See if a socket was opened or closed. """
try:
established = self.check_port()
if self.running and established == False:
self.running = False
self.actually_changed = True
self.status = _("Closed")
elif self.running == False and established == True:
self.running = True
self.actually_changed = True
self.status = _("Open")
else:
self.actually_changed = False
self.status = _("Unknown")
except:
self.set_error()
Watch.timer_update(self)
def check_port(self):
""" see if there is a connection on the port or not """
conn = False
y = os.popen("netstat -nt", "r").read().splitlines()
del y[0]
del y[0]
for k in y:
k = k.split(' ')
while True:
try:
k.remove('')
except:
break
try:
port = int(k[3][k[3].rfind(':')+1:])
except:
port = -1
if port == int(self.port):
conn = True
if conn:
return True
else:
return False
def get_gui_info(self):
return [(_('Name'), self.name),
(_('Last changed'), self.last_changed),
(_('Port'), self.port),
(_('Status'), self.status)]
def get_balloon_text(self):
""" create the text for the balloon """
text = ""
if self.running == True:
text = _("The network socket on port %s was established.") % self.port
else:
text = _("The network socket on port %s was closed.") % self.port
return text
| gpl-2.0 |
anthrotype/robofab | Lib/robofab/tools/otFeatures.py | 9 | 4577 | """Simple module to write features to font"""
import string
from types import StringType, ListType, TupleType
from robofab.world import world
if world.inFontLab:
from FL import *
from fl_cmd import *
from robofab.tools.toolsFL import FontIndex
#feat = []
#feat.append('feature smcp {')
#feat.append('\tlookup SMALLCAPS {')
#feat.append('\t\tsub @LETTERS_LC by @LETTERS_LC;')
#feat.append('\t} SMALLCAPS;')
#feat.append('} smcp;')
class FeatureWriter:
"""Make properly formatted feature code"""
def __init__(self, type):
self.type = type
self.data = []
def add(self, src, dst):
"""Add a substitution: change src to dst."""
self.data.append((src, dst))
def write(self, group=0):
"""Write the whole thing to string"""
t = []
if len(self.data) == 0:
return None
t.append('feature %s {' % self.type)
for src, dst in self.data:
if isinstance(src, (list, tuple)):
if group:
src = "[%s]" % string.join(src, ' ')
else:
src = string.join(src, ' ')
if isinstance(dst, (list, tuple)):
if group:
dst = "[%s]" % string.join(dst, ' ')
else:
dst = string.join(dst, ' ')
src = string.strip(src)
dst = string.strip(dst)
t.append("\tsub %s by %s;" % (src, dst))
t.append('}%s;' % self.type)
return string.join(t, '\n')
class GlyphName:
"""Simple class that splits a glyphname in handy parts,
access the parts as attributes of the name."""
def __init__(self, name):
self.suffix = []
self.ligs = []
self.name = self.base = name
if '.' in name:
self.bits = name.split('.')
self.base = self.bits[0]
self.suffix = self.bits[1:]
if '_' in name:
self.ligs = self.base.split('_')
def GetAlternates(font, flavor="alt", match=0):
"""Sort the glyphs of this font by the parts of the name.
flavor is the bit to look for, i.e. 'alt' in a.alt
match = 1 if you want a exact match: alt1 != alt
match = 0 if the flavor is a partial match: alt == alt1
"""
names = {}
for c in font.glyphs:
name = GlyphName(c.name)
if not names.has_key(name.base):
names[name.base] = []
if match:
# only include if there is an exact match
if flavor in name.suffix:
names[name.base].append(c.name)
else:
# include if there is a partial match
for a in name.suffix:
if a.find(flavor) != -1:
names[name.base].append(c.name)
return names
# XXX there should be a more generic glyph finder.
def MakeCapsFeature(font):
"""Build a feature for smallcaps based on .sc glyphnames"""
names = GetAlternates(font, 'sc', match=1)
fw = FeatureWriter('smcp')
k = names.keys()
k.sort()
for p in k:
if names[p]:
fw.add(p, names[p])
feat = fw.write()
if feat:
font.features.append(Feature('smcp', feat))
return feat
def MakeAlternatesFeature(font):
"""Build a aalt feature based on glyphnames"""
names = GetAlternates(font, 'alt', match=0)
fw = FeatureWriter('aalt')
k = names.keys()
k.sort()
for p in k:
if names[p]:
fw.add(p, names[p])
feat = fw.write(group=1)
if feat:
font.features.append(Feature('aalt', feat))
return feat
def MakeSwashFeature(font):
"""Build a swash feature based on glyphnames"""
names = GetAlternates(font, 'swash', match=0)
fw = FeatureWriter('swsh')
k=names.keys()
k.sort()
for p in k:
if names[p]:
l=names[p]
l.sort()
fw.add(p, l[0])
feat=fw.write()
if feat:
font.features.append(Feature('swsh', feat))
return feat
def MakeLigaturesFeature(font):
"""Build a liga feature based on glyphnames"""
from robofab.gString import ligatures
ligCountDict = {}
for glyph in font.glyphs:
if glyph.name in ligatures:
if len(glyph.name) not in ligCountDict.keys():
ligCountDict[len(glyph.name)] = [glyph.name]
else:
ligCountDict[len(glyph.name)].append(glyph.name)
elif glyph.name.find('_') != -1:
usCounter=1
for i in glyph.name:
if i =='_':
usCounter=usCounter+1
if usCounter not in ligCountDict.keys():
ligCountDict[usCounter] = [glyph.name]
else:
ligCountDict[usCounter].append(glyph.name)
ligCount=ligCountDict.keys()
ligCount.sort()
foundLigs=[]
for i in ligCount:
l = ligCountDict[i]
l.sort()
foundLigs=foundLigs+l
fw=FeatureWriter('liga')
for i in foundLigs:
if i.find('_') != -1:
sub=i.split('_')
else:
sub=[]
for c in i:
sub.append(c)
fw.add(sub, i)
feat=fw.write()
if feat:
font.features.append(Feature('liga', feat))
return feat
if __name__ == "__main__":
fw = FeatureWriter('liga')
fw.add(['f', 'f', 'i'], ['f_f_i'])
fw.add('f f ', 'f_f')
fw.add(['f', 'i'], 'f_i')
print fw.write()
| bsd-3-clause |
edx/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/admin/widgets.py | 321 | 4430 | from django.conf import settings
from django.contrib.gis.gdal import OGRException
from django.contrib.gis.geos import GEOSGeometry, GEOSException
from django.forms.widgets import Textarea
from django.template import loader, Context
from django.utils import translation
# Creating a template context that contains Django settings
# values needed by admin map templates.
geo_context = Context({'ADMIN_MEDIA_PREFIX' : settings.ADMIN_MEDIA_PREFIX,
'LANGUAGE_BIDI' : translation.get_language_bidi(),
})
class OpenLayersWidget(Textarea):
"""
Renders an OpenLayers map using the WKT of the geometry.
"""
def render(self, name, value, attrs=None):
# Update the template parameters with any attributes passed in.
if attrs: self.params.update(attrs)
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, basestring):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError):
value = None
if value and value.geom_type.upper() != self.geom_type:
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-','_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except OGRException:
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
return loader.render_to_string(self.template, self.params,
context_instance=geo_context)
def map_options(self):
"Builds the map options hash for the OpenLayers template."
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % str(extent)
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options
| gpl-3.0 |
yjmade/odoo | addons/payment_buckaroo/controllers/main.py | 325 | 1270 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class BuckarooController(http.Controller):
_return_url = '/payment/buckaroo/return'
_cancel_url = '/payment/buckaroo/cancel'
_exception_url = '/payment/buckaroo/error'
_reject_url = '/payment/buckaroo/reject'
@http.route([
'/payment/buckaroo/return',
'/payment/buckaroo/cancel',
'/payment/buckaroo/error',
'/payment/buckaroo/reject',
], type='http', auth='none')
def buckaroo_return(self, **post):
""" Buckaroo."""
_logger.info('Buckaroo: entering form_feedback with post data %s', pprint.pformat(post)) # debug
request.registry['payment.transaction'].form_feedback(request.cr, SUPERUSER_ID, post, 'buckaroo', context=request.context)
return_url = post.pop('return_url', '')
if not return_url:
data ='' + post.pop('ADD_RETURNDATA', '{}').replace("'", "\"")
custom = json.loads(data)
return_url = custom.pop('return_url', '/')
return werkzeug.utils.redirect(return_url)
| agpl-3.0 |
bit-trade-one/SoundModuleAP | lib-src/lv2/lv2/waflib/Build.py | 5 | 21729 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,errno,re,shutil
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Runner,TaskGen,Utils,ConfigSet,Task,Logs,Options,Context,Errors
import waflib.Node
CACHE_DIR='c4che'
CACHE_SUFFIX='_cache.py'
INSTALL=1337
UNINSTALL=-1337
SAVED_ATTRS='root node_deps raw_deps task_sigs'.split()
CFG_FILES='cfg_files'
POST_AT_ONCE=0
POST_LAZY=1
POST_BOTH=2
class BuildContext(Context.Context):
'''executes the build'''
cmd='build'
variant=''
def __init__(self,**kw):
super(BuildContext,self).__init__(**kw)
self.is_install=0
self.top_dir=kw.get('top_dir',Context.top_dir)
self.run_dir=kw.get('run_dir',Context.run_dir)
self.post_mode=POST_AT_ONCE
self.out_dir=kw.get('out_dir',Context.out_dir)
self.cache_dir=kw.get('cache_dir',None)
if not self.cache_dir:
self.cache_dir=self.out_dir+os.sep+CACHE_DIR
self.all_envs={}
self.task_sigs={}
self.node_deps={}
self.raw_deps={}
self.cache_dir_contents={}
self.task_gen_cache_names={}
self.launch_dir=Context.launch_dir
self.jobs=Options.options.jobs
self.targets=Options.options.targets
self.keep=Options.options.keep
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.progress_bar=Options.options.progress_bar
self.deps_man=Utils.defaultdict(list)
self.current_group=0
self.groups=[]
self.group_names={}
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir,self.variant)
variant_dir=property(get_variant_dir,None)
def __call__(self,*k,**kw):
kw['bld']=self
ret=TaskGen.task_gen(*k,**kw)
self.task_gen_cache_names={}
self.add_to_group(ret,group=kw.get('group',None))
return ret
def rule(self,*k,**kw):
def f(rule):
ret=self(*k,**kw)
ret.rule=rule
return ret
return f
def __copy__(self):
raise Errors.WafError('build contexts are not supposed to be copied')
def install_files(self,*k,**kw):
pass
def install_as(self,*k,**kw):
pass
def symlink_as(self,*k,**kw):
pass
def load_envs(self):
node=self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst=node.ant_glob('**/*%s'%CACHE_SUFFIX,quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name=x.path_from(node).replace(CACHE_SUFFIX,'').replace('\\','/')
env=ConfigSet.ConfigSet(x.abspath())
self.all_envs[name]=env
for f in env[CFG_FILES]:
newnode=self.root.find_resource(f)
try:
h=Utils.h_file(newnode.abspath())
except(IOError,AttributeError):
Logs.error('cannot find %r'%f)
h=Utils.SIG_NIL
newnode.sig=h
def init_dirs(self):
if not(os.path.isabs(self.top_dir)and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path=self.srcnode=self.root.find_dir(self.top_dir)
self.bldnode=self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
Logs.info("Waf: Entering directory `%s'"%self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
if self.progress_bar:
sys.stderr.write(Logs.colors.cursor_off)
try:
self.compile()
finally:
if self.progress_bar==1:
c=len(self.returned_tasks)or 1
self.to_log(self.progress_line(c,c,Logs.colors.BLUE,Logs.colors.NORMAL))
print('')
sys.stdout.flush()
sys.stderr.write(Logs.colors.cursor_on)
Logs.info("Waf: Leaving directory `%s'"%self.variant_dir)
self.post_build()
def restore(self):
try:
env=ConfigSet.ConfigSet(os.path.join(self.cache_dir,'build.config.py'))
except(IOError,OSError):
pass
else:
if env['version']<Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
dbfn=os.path.join(self.variant_dir,Context.DBFILE)
try:
data=Utils.readf(dbfn,'rb')
except(IOError,EOFError):
Logs.debug('build: Could not load the build cache %s (missing)'%dbfn)
else:
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
try:
data=cPickle.loads(data)
except Exception ,e:
Logs.debug('build: Could not pickle the build cache %s: %r'%(dbfn,e))
else:
for x in SAVED_ATTRS:
setattr(self,x,data[x])
finally:
waflib.Node.pickle_lock.release()
self.init_dirs()
def store(self):
data={}
for x in SAVED_ATTRS:
data[x]=getattr(self,x)
db=os.path.join(self.variant_dir,Context.DBFILE)
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
x=cPickle.dumps(data,-1)
finally:
waflib.Node.pickle_lock.release()
Utils.writef(db+'.tmp',x,m='wb')
try:
st=os.stat(db)
os.remove(db)
if not Utils.is_win32:
os.chown(db+'.tmp',st.st_uid,st.st_gid)
except(AttributeError,OSError):
pass
os.rename(db+'.tmp',db)
def compile(self):
Logs.debug('build: compile()')
self.producer=Runner.Parallel(self,self.jobs)
self.producer.biter=self.get_build_iterator()
self.returned_tasks=[]
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self,tool,tooldir=None,funs=None):
if isinstance(tool,list):
for i in tool:self.setup(i,tooldir)
return
module=Context.load_tool(tool,tooldir)
if hasattr(module,"setup"):module.setup(self)
def get_env(self):
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def add_manual_dependency(self,path,value):
if path is None:
raise ValueError('Invalid input')
if isinstance(path,waflib.Node.Node):
node=path
elif os.path.isabs(path):
node=self.root.find_resource(path)
else:
node=self.path.find_resource(path)
if isinstance(value,list):
self.deps_man[id(node)].extend(value)
else:
self.deps_man[id(node)].append(value)
def launch_node(self):
try:
return self.p_ln
except AttributeError:
self.p_ln=self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self,env,vars_lst):
if not env.table:
env=env.parent
if not env:
return Utils.SIG_NIL
idx=str(id(env))+str(vars_lst)
try:
cache=self.cache_env
except AttributeError:
cache=self.cache_env={}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst=[env[a]for a in vars_lst]
ret=Utils.h_list(lst)
Logs.debug('envhash: %s %r',Utils.to_hex(ret),lst)
cache[idx]=ret
return ret
def get_tgen_by_name(self,name):
cache=self.task_gen_cache_names
if not cache:
for g in self.groups:
for tg in g:
try:
cache[tg.name]=tg
except AttributeError:
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r'%name)
def progress_line(self,state,total,col1,col2):
n=len(str(total))
Utils.rot_idx+=1
ind=Utils.rot_chr[Utils.rot_idx%4]
pc=(100.*state)/total
eta=str(self.timer)
fs="[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s]["%(n,n,ind)
left=fs%(state,total,col1,pc,col2)
right='][%s%s%s]'%(col1,eta,col2)
cols=Logs.get_term_cols()-len(left)-len(right)+2*len(col1)+2*len(col2)
if cols<7:cols=7
ratio=((cols*state)//total)-1
bar=('='*ratio+'>').ljust(cols)
msg=Utils.indicator%(left,bar,right)
return msg
def declare_chain(self,*k,**kw):
return TaskGen.declare_chain(*k,**kw)
def pre_build(self):
for m in getattr(self,'pre_funs',[]):
m(self)
def post_build(self):
for m in getattr(self,'post_funs',[]):
m(self)
def add_pre_fun(self,meth):
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs=[meth]
def add_post_fun(self,meth):
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs=[meth]
def get_group(self,x):
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self,tgen,group=None):
assert(isinstance(tgen,TaskGen.task_gen)or isinstance(tgen,Task.TaskBase))
tgen.bld=self
self.get_group(group).append(tgen)
def get_group_name(self,g):
if not isinstance(g,list):
g=self.groups[g]
for x in self.group_names:
if id(self.group_names[x])==id(g):
return x
return''
def get_group_idx(self,tg):
se=id(tg)
for i in range(len(self.groups)):
for t in self.groups[i]:
if id(t)==se:
return i
return None
def add_group(self,name=None,move=True):
if name and name in self.group_names:
Logs.error('add_group: name %s already present'%name)
g=[]
self.group_names[name]=g
self.groups.append(g)
if move:
self.current_group=len(self.groups)-1
def set_group(self,idx):
if isinstance(idx,str):
g=self.group_names[idx]
for i in range(len(self.groups)):
if id(g)==id(self.groups[i]):
self.current_group=i
else:
self.current_group=idx
def total(self):
total=0
for group in self.groups:
for tg in group:
try:
total+=len(tg.tasks)
except AttributeError:
total+=1
return total
def get_targets(self):
to_post=[]
min_grp=0
for name in self.targets.split(','):
tg=self.get_tgen_by_name(name)
if not tg:
raise Errors.WafError('target %r does not exist'%name)
m=self.get_group_idx(tg)
if m>min_grp:
min_grp=m
to_post=[tg]
elif m==min_grp:
to_post.append(tg)
return(min_grp,to_post)
def get_all_task_gen(self):
lst=[]
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
if self.targets=='*':
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur<self._min_grp:
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln=self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln=self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)'%(ln.abspath(),self.srcnode.abspath()))
ln=self.srcnode
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self,idx):
tasks=[]
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError:
tasks.append(tg)
return tasks
def get_build_iterator(self):
self.cur=0
if self.targets and self.targets!='*':
(self._min_grp,self._exact_tg)=self.get_targets()
global lazy_post
if self.post_mode!=POST_LAZY:
while self.cur<len(self.groups):
self.post_group()
self.cur+=1
self.cur=0
while self.cur<len(self.groups):
if self.post_mode!=POST_AT_ONCE:
self.post_group()
tasks=self.get_tasks_group(self.cur)
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks=tasks
self.cur+=1
if not tasks:
continue
yield tasks
while 1:
yield[]
class inst(Task.Task):
color='CYAN'
def uid(self):
lst=[self.dest,self.path]+self.source
return Utils.h_list(repr(lst))
def post(self):
buf=[]
for x in self.source:
if isinstance(x,waflib.Node.Node):
y=x
else:
y=self.path.find_resource(x)
if not y:
if Logs.verbose:
Logs.warn('Could not find %s immediately (may cause broken builds)'%x)
idx=self.generator.bld.get_group_idx(self)
for tg in self.generator.bld.groups[idx]:
if not isinstance(tg,inst)and id(tg)!=id(self):
tg.post()
y=self.path.find_resource(x)
if y:
break
else:
raise Errors.WafError('Could not find %r in %r'%(x,self.path))
buf.append(y)
self.inputs=buf
def runnable_status(self):
ret=super(inst,self).runnable_status()
if ret==Task.SKIP_ME:
return Task.RUN_ME
return ret
def __str__(self):
return''
def run(self):
return self.generator.exec_task()
def get_install_path(self,destdir=True):
dest=Utils.subst_vars(self.dest,self.env)
dest=dest.replace('/',os.sep)
if destdir and Options.options.destdir:
dest=os.path.join(Options.options.destdir,os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def exec_install_files(self):
destpath=self.get_install_path()
if not destpath:
raise Errors.WafError('unknown installation path %r'%self.generator)
for x,y in zip(self.source,self.inputs):
if self.relative_trick:
destfile=os.path.join(destpath,y.path_from(self.path))
else:
destfile=os.path.join(destpath,y.name)
self.generator.bld.do_install(y.abspath(),destfile,self.chmod)
def exec_install_as(self):
destfile=self.get_install_path()
self.generator.bld.do_install(self.inputs[0].abspath(),destfile,self.chmod)
def exec_symlink_as(self):
destfile=self.get_install_path()
src=self.link
if self.relative_trick:
src=os.path.relpath(src,os.path.dirname(destfile))
self.generator.bld.do_link(src,destfile)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd='install'
def __init__(self,**kw):
super(InstallContext,self).__init__(**kw)
self.uninstall=[]
self.is_install=INSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
d,_=os.path.split(tgt)
if not d:
raise Errors.WafError('Invalid installation given %r->%r'%(src,tgt))
Utils.check_dir(d)
srclbl=src.replace(self.srcnode.abspath()+os.sep,'')
if not Options.options.force:
try:
st1=os.stat(tgt)
st2=os.stat(src)
except OSError:
pass
else:
if st1.st_mtime+2>=st2.st_mtime and st1.st_size==st2.st_size:
if not self.progress_bar:
Logs.info('- install %s (from %s)'%(tgt,srclbl))
return False
if not self.progress_bar:
Logs.info('+ install %s (from %s)'%(tgt,srclbl))
try:
os.remove(tgt)
except OSError:
pass
try:
shutil.copy2(src,tgt)
os.chmod(tgt,chmod)
except IOError:
try:
os.stat(src)
except(OSError,IOError):
Logs.error('File %r does not exist'%src)
raise Errors.WafError('Could not install the file %r'%tgt)
def do_link(self,src,tgt):
d,_=os.path.split(tgt)
Utils.check_dir(d)
link=False
if not os.path.islink(tgt):
link=True
elif os.readlink(tgt)!=src:
link=True
if link:
try:os.remove(tgt)
except OSError:pass
if not self.progress_bar:
Logs.info('+ symlink %s (to %s)'%(tgt,src))
os.symlink(src,tgt)
else:
if not self.progress_bar:
Logs.info('- symlink %s (to %s)'%(tgt,src))
def run_task_now(self,tsk,postpone):
tsk.post()
if not postpone:
if tsk.runnable_status()==Task.ASK_LATER:
raise self.WafError('cannot post the task %r'%tsk)
tsk.run()
def install_files(self,dest,files,env=None,chmod=Utils.O644,relative_trick=False,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
if isinstance(files,waflib.Node.Node):
tsk.source=[files]
else:
tsk.source=Utils.to_list(files)
tsk.dest=dest
tsk.exec_task=tsk.exec_install_files
tsk.relative_trick=relative_trick
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def install_as(self,dest,srcfile,env=None,chmod=Utils.O644,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
tsk.source=[srcfile]
tsk.dest=dest
tsk.exec_task=tsk.exec_install_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def symlink_as(self,dest,src,env=None,cwd=None,add=True,postpone=True,relative_trick=False):
if Utils.is_win32:
return
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.dest=dest
tsk.path=cwd or self.path
tsk.source=[]
tsk.link=src
tsk.relative_trick=relative_trick
tsk.exec_task=tsk.exec_symlink_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd='uninstall'
def __init__(self,**kw):
super(UninstallContext,self).__init__(**kw)
self.is_install=UNINSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError ,e:
if e.errno!=errno.ENOENT:
if not getattr(self,'uninstall_error',None):
self.uninstall_error=True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose>1:
Logs.warn('Could not remove %s (error code %r)'%(e.filename,e.errno))
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def do_link(self,src,tgt):
try:
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
os.remove(tgt)
except OSError:
pass
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def execute(self):
try:
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task,'runnable_status_back',Task.Task.runnable_status)
setattr(Task.Task,'runnable_status',runnable_status)
super(UninstallContext,self).execute()
finally:
setattr(Task.Task,'runnable_status',Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd='clean'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
Logs.debug('build: clean called')
if self.bldnode!=self.srcnode:
lst=[]
for e in self.all_envs.values():
lst.extend(self.root.find_or_declare(f)for f in e[CFG_FILES])
for n in self.bldnode.ant_glob('**/*',excl='.lock* *conf_check_*/** config.log c4che/*',quiet=True):
if n in lst:
continue
n.delete()
self.root.children={}
for v in'node_deps task_sigs raw_deps'.split():
setattr(self,v,{})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd='list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
for g in self.groups:
for tg in g:
try:
f=tg.post
except AttributeError:
pass
else:
f()
try:
self.get_tgen_by_name('')
except Exception:
pass
lst=list(self.task_gen_cache_names.keys())
lst.sort()
for k in lst:
Logs.pprint('GREEN',k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd='step'
def __init__(self,**kw):
super(StepContext,self).__init__(**kw)
self.files=Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets=None
if self.targets and self.targets!='*':
targets=self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f=tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher=self.get_matcher(pat)
for tg in g:
if isinstance(tg,Task.TaskBase):
lst=[tg]
else:
lst=tg.tasks
for tsk in lst:
do_exec=False
for node in getattr(tsk,'inputs',[]):
if matcher(node,output=False):
do_exec=True
break
for node in getattr(tsk,'outputs',[]):
if matcher(node,output=True):
do_exec=True
break
if do_exec:
ret=tsk.run()
Logs.info('%s -> exit %r'%(str(tsk),ret))
def get_matcher(self,pat):
inn=True
out=True
if pat.startswith('in:'):
out=False
pat=pat.replace('in:','')
elif pat.startswith('out:'):
inn=False
pat=pat.replace('out:','')
anode=self.root.find_node(pat)
pattern=None
if not anode:
if not pat.startswith('^'):
pat='^.+?%s'%pat
if not pat.endswith('$'):
pat='%s$'%pat
pattern=re.compile(pat)
def match(node,output):
if output==True and not out:
return False
if output==False and not inn:
return False
if anode:
return anode==node
else:
return pattern.match(node.abspath())
return match
BuildContext.store=Utils.nogc(BuildContext.store)
BuildContext.restore=Utils.nogc(BuildContext.restore)
| gpl-2.0 |
googleads/google-ads-python | google/ads/googleads/v8/errors/types/policy_violation_error.py | 1 | 1156 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"PolicyViolationErrorEnum",},
)
class PolicyViolationErrorEnum(proto.Message):
r"""Container for enum describing possible policy violation
errors.
"""
class PolicyViolationError(proto.Enum):
r"""Enum describing possible policy violation errors."""
UNSPECIFIED = 0
UNKNOWN = 1
POLICY_ERROR = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
kapiziak/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/make/gyptest-noload.py | 362 | 2023 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests the use of the NO_LOAD flag which makes loading sub .mk files
optional.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('all.gyp', chdir='noload')
test.relocate('noload', 'relocate/noload')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload')
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Just sanity test that NO_LOAD=lib doesn't break anything.
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=z'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Make sure we can rebuild without reloading the sub .mk file.
with open('relocate/noload/main.c', 'a') as src_file:
src_file.write("\n")
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Change shared.c, but verify that it doesn't get rebuild if we don't load it.
with open('relocate/noload/lib/shared.c', 'w') as shared_file:
shared_file.write(
'#include "shared.h"\n'
'const char kSharedStr[] = "modified";\n'
)
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.pass_test()
| gpl-3.0 |
pyramania/scipy | scipy/io/tests/test_fortran.py | 95 | 2591 | ''' Tests for fortran sequential files '''
import tempfile
import shutil
from os import path
from glob import iglob
import re
from numpy.testing import assert_equal, assert_allclose, run_module_suite
import numpy as np
from scipy.io import FortranFile
DATA_PATH = path.join(path.dirname(__file__), 'data')
def test_fortranfiles_read():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search('fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
f = FortranFile(filename, 'r', '<u4')
data = f.read_record(dtype=m.group(1).replace('s', '<')).reshape(dims)
f.close()
counter = 0
for k in range(dims[2]):
for j in range(dims[1]):
for i in range(dims[0]):
assert_equal(counter, data[i,j,k])
counter += 1
def test_fortranfiles_mixed_record():
filename = path.join(DATA_PATH, "fortran-mixed.dat")
with FortranFile(filename, 'r', '<u4') as f:
record = f.read_record('<i4,<f4,<i8,(2)<f8')
assert_equal(record['f0'][0], 1)
assert_allclose(record['f1'][0], 2.3)
assert_equal(record['f2'][0], 4)
assert_allclose(record['f3'][0], [5.6, 7.8])
def test_fortranfiles_write():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search('fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
counter = 0
data = np.zeros(dims, dtype=m.group(1).replace('s', '<'))
for k in range(dims[2]):
for j in range(dims[1]):
for i in range(dims[0]):
data[i,j,k] = counter
counter += 1
tmpdir = tempfile.mkdtemp()
try:
testFile = path.join(tmpdir,path.basename(filename))
f = FortranFile(testFile, 'w','<u4')
f.write_record(data)
f.close()
originalfile = open(filename, 'rb')
newfile = open(testFile, 'rb')
assert_equal(originalfile.read(), newfile.read(),
err_msg=filename)
originalfile.close()
newfile.close()
finally:
shutil.rmtree(tmpdir)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
alu042/edx-platform | common/lib/xmodule/xmodule/seq_module.py | 12 | 17301 | """
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import collections
import json
import logging
from pkg_resources import resource_string
import warnings
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Integer, Scope, Boolean, String
from xblock.fragment import Fragment
import newrelic.agent
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule, STUDENT_VIEW
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
# Entrance Exam flag -- see cms/contentstore/views/entrance_exam.py for usage
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
exam_review_rules = String(
display_name=_("Software Secure Review Rules"),
help=_(
"This setting indicates what rules the proctoring team should follow when viewing the videos."
),
default='',
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
@property
def is_proctored_exam(self):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
return self.is_proctored_enabled
@is_proctored_exam.setter
def is_proctored_exam(self, value):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
self.is_proctored_enabled = value
@XBlock.wants('proctoring')
@XBlock.wants('credit')
@XBlock.needs("user")
@XBlock.needs("bookmarks")
class SequenceModule(SequenceFields, ProctoringFields, XModule):
"""
Layout module which lays out content in a temporal sequence
"""
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
try:
self.position = int(self.system.position)
except (ValueError, TypeError):
# Check for https://openedx.atlassian.net/browse/LMS-6496
warnings.warn(
"Sequential position cannot be converted to an integer: {pos!r}".format(
pos=self.system.position,
),
RuntimeWarning,
)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
context = context or {}
bookmarks_service = self.runtime.service(self, "bookmarks")
context["username"] = self.runtime.service(self, "user").get_current_user().opt_attrs['edx-platform.username']
parent_module = self.get_parent()
display_names = [
parent_module.display_name_with_default,
self.display_name_with_default
]
# We do this up here because proctored exam functionality could bypass
# rendering after this section.
self._capture_basic_metrics()
# Is this sequential part of a timed or proctored exam?
if self.is_time_limited:
view_html = self._time_limited_student_view(context)
# Do we have an alternate rendering
# from the edx_proctoring subsystem?
if view_html:
fragment.add_content(view_html)
return fragment
display_items = self.get_display_items()
for child in display_items:
is_bookmarked = bookmarks_service.is_bookmarked(usage_key=child.scope_ids.usage_id)
context["bookmarked"] = is_bookmarked
progress = child.get_progress()
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
# `titles` is a list of titles to inject into the sequential tooltip display.
# We omit any blank titles to avoid blank lines in the tooltip display.
titles = [title.strip() for title in child.get_content_titles() if title.strip()]
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon_class(),
'id': child.scope_ids.usage_id.to_deprecated_string(),
'bookmarked': is_bookmarked,
'path': " > ".join(display_names + [child.display_name_with_default]),
}
if childinfo['title'] == '':
childinfo['title'] = child.display_name_with_default_escaped
contents.append(childinfo)
params = {
'items': contents,
'element_id': self.location.html_id(),
'item_id': self.location.to_deprecated_string(),
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
}
fragment.add_content(self.system.render_template("seq_module.html", params))
self._capture_full_seq_item_metrics(display_items)
self._capture_current_unit_metrics(display_items)
# Get all descendant XBlock types and counts
return fragment
def _locations_in_subtree(self, node):
"""
The usage keys for all descendants of an XBlock/XModule as a flat list.
Includes the location of the node passed in.
"""
stack = [node]
locations = []
while stack:
curr = stack.pop()
locations.append(curr.location)
if curr.has_children:
stack.extend(curr.get_children())
return locations
def _capture_basic_metrics(self):
"""
Capture basic information about this sequence in New Relic.
"""
newrelic.agent.add_custom_parameter('seq.block_id', unicode(self.location))
newrelic.agent.add_custom_parameter('seq.display_name', self.display_name or '')
newrelic.agent.add_custom_parameter('seq.position', self.position)
newrelic.agent.add_custom_parameter('seq.is_time_limited', self.is_time_limited)
def _capture_full_seq_item_metrics(self, display_items):
"""
Capture information about the number and types of XBlock content in
the sequence as a whole. We send this information to New Relic so that
we can do better performance analysis of courseware.
"""
# Basic count of the number of Units (a.k.a. VerticalBlocks) we have in
# this learning sequence
newrelic.agent.add_custom_parameter('seq.num_units', len(display_items))
# Count of all modules (leaf nodes) in this sequence (e.g. videos,
# problems, etc.) The units (verticals) themselves are not counted.
all_item_keys = self._locations_in_subtree(self)
newrelic.agent.add_custom_parameter('seq.num_items', len(all_item_keys))
# Count of all modules by block_type (e.g. "video": 2, "discussion": 4)
block_counts = collections.Counter(usage_key.block_type for usage_key in all_item_keys)
for block_type, count in block_counts.items():
newrelic.agent.add_custom_parameter('seq.block_counts.{}'.format(block_type), count)
def _capture_current_unit_metrics(self, display_items):
"""
Capture information about the current selected Unit within the Sequence.
"""
# Positions are stored with indexing starting at 1. If we get into a
# weird state where the saved position is out of bounds (e.g. the
# content was changed), avoid going into any details about this unit.
if 1 <= self.position <= len(display_items):
# Basic info about the Unit...
current = display_items[self.position - 1]
newrelic.agent.add_custom_parameter('seq.current.block_id', unicode(current.location))
newrelic.agent.add_custom_parameter('seq.current.display_name', current.display_name or '')
# Examining all items inside the Unit (or split_test, conditional, etc.)
child_locs = self._locations_in_subtree(current)
newrelic.agent.add_custom_parameter('seq.current.num_items', len(child_locs))
curr_block_counts = collections.Counter(usage_key.block_type for usage_key in child_locs)
for block_type, count in curr_block_counts.items():
newrelic.agent.add_custom_parameter('seq.current.block_counts.{}'.format(block_type), count)
def _time_limited_student_view(self, context):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
# Is this sequence designated as a Timed Examination, which includes
# Proctored Exams
feature_enabled = (
proctoring_service and
credit_service and
self.is_time_limited
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam,
'due_date': self.due
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequences Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
| agpl-3.0 |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/astroid/tests/unittest_transforms.py | 1 | 8215 | # Copyright (c) 2015-2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/LICENSE
import contextlib
import time
import unittest
from astroid import builder, nodes, parse, transforms
@contextlib.contextmanager
def add_transform(manager, node, transform, predicate=None):
manager.register_transform(node, transform, predicate)
try:
yield
finally:
manager.unregister_transform(node, transform, predicate)
class TestTransforms(unittest.TestCase):
def setUp(self):
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code):
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_function_inlining_transform(self):
def transform_call(node):
# Let's do some function inlining
inferred = next(node.infer())
return inferred
self.transformer.register_transform(nodes.Call, transform_call)
module = self.parse_transform(
"""
def test(): return 42
test() #@
"""
)
self.assertIsInstance(module.body[1], nodes.Expr)
self.assertIsInstance(module.body[1].value, nodes.Const)
self.assertEqual(module.body[1].value.value, 42)
def test_recursive_transforms_into_astroid_fields(self):
# Test that the transformer walks properly the tree
# by going recursively into the _astroid_fields per each node.
def transform_compare(node):
# Let's check the values of the ops
_, right = node.ops[0]
# Assume they are Consts and they were transformed before
# us.
return nodes.const_factory(node.left.value < right.value)
def transform_name(node):
# Should be Consts
return next(node.infer())
self.transformer.register_transform(nodes.Compare, transform_compare)
self.transformer.register_transform(nodes.Name, transform_name)
module = self.parse_transform(
"""
a = 42
b = 24
a < b
"""
)
self.assertIsInstance(module.body[2], nodes.Expr)
self.assertIsInstance(module.body[2].value, nodes.Const)
self.assertFalse(module.body[2].value.value)
def test_transform_patches_locals(self):
def transform_function(node):
assign = nodes.Assign()
name = nodes.AssignName()
name.name = "value"
assign.targets = [name]
assign.value = nodes.const_factory(42)
node.body.append(assign)
self.transformer.register_transform(nodes.FunctionDef, transform_function)
module = self.parse_transform(
"""
def test():
pass
"""
)
func = module.body[0]
self.assertEqual(len(func.body), 2)
self.assertIsInstance(func.body[1], nodes.Assign)
self.assertEqual(func.body[1].as_string(), "value = 42")
def test_predicates(self):
def transform_call(node):
inferred = next(node.infer())
return inferred
def should_inline(node):
return node.func.name.startswith("inlineme")
self.transformer.register_transform(nodes.Call, transform_call, should_inline)
module = self.parse_transform(
"""
def inlineme_1():
return 24
def dont_inline_me():
return 42
def inlineme_2():
return 2
inlineme_1()
dont_inline_me()
inlineme_2()
"""
)
values = module.body[-3:]
self.assertIsInstance(values[0], nodes.Expr)
self.assertIsInstance(values[0].value, nodes.Const)
self.assertEqual(values[0].value.value, 24)
self.assertIsInstance(values[1], nodes.Expr)
self.assertIsInstance(values[1].value, nodes.Call)
self.assertIsInstance(values[2], nodes.Expr)
self.assertIsInstance(values[2].value, nodes.Const)
self.assertEqual(values[2].value.value, 2)
def test_transforms_are_separated(self):
# Test that the transforming is done at a separate
# step, which means that we are not doing inference
# on a partially constructed tree anymore, which was the
# source of crashes in the past when certain inference rules
# were used in a transform.
def transform_function(node):
if node.decorators:
for decorator in node.decorators.nodes:
inferred = next(decorator.infer())
if inferred.qname() == "abc.abstractmethod":
return next(node.infer_call_result())
return None
manager = builder.MANAGER
with add_transform(manager, nodes.FunctionDef, transform_function):
module = builder.parse(
"""
import abc
from abc import abstractmethod
class A(object):
@abc.abstractmethod
def ala(self):
return 24
@abstractmethod
def bala(self):
return 42
"""
)
cls = module["A"]
ala = cls.body[0]
bala = cls.body[1]
self.assertIsInstance(ala, nodes.Const)
self.assertEqual(ala.value, 24)
self.assertIsInstance(bala, nodes.Const)
self.assertEqual(bala.value, 42)
def test_transforms_are_called_for_builtin_modules(self):
# Test that transforms are called for builtin modules.
def transform_function(node):
name = nodes.AssignName()
name.name = "value"
node.args.args = [name]
return node
manager = builder.MANAGER
predicate = lambda node: node.root().name == "time"
with add_transform(manager, nodes.FunctionDef, transform_function, predicate):
builder_instance = builder.AstroidBuilder()
module = builder_instance.module_build(time)
asctime = module["asctime"]
self.assertEqual(len(asctime.args.args), 1)
self.assertIsInstance(asctime.args.args[0], nodes.AssignName)
self.assertEqual(asctime.args.args[0].name, "value")
def test_builder_apply_transforms(self):
def transform_function(node):
return nodes.const_factory(42)
manager = builder.MANAGER
with add_transform(manager, nodes.FunctionDef, transform_function):
astroid_builder = builder.AstroidBuilder(apply_transforms=False)
module = astroid_builder.string_build("""def test(): pass""")
# The transform wasn't applied.
self.assertIsInstance(module.body[0], nodes.FunctionDef)
def test_transform_crashes_on_is_subtype_of(self):
# Test that we don't crash when having is_subtype_of
# in a transform, as per issue #188. This happened
# before, when the transforms weren't in their own step.
def transform_class(cls):
if cls.is_subtype_of("django.db.models.base.Model"):
return cls
return cls
self.transformer.register_transform(nodes.ClassDef, transform_class)
self.parse_transform(
"""
# Change environ to automatically call putenv() if it exists
import os
putenv = os.putenv
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
"""
)
if __name__ == "__main__":
unittest.main()
| mit |
nvoron23/avos | openstack_dashboard/dashboards/project/databases/views.py | 3 | 8438 | # Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing database instances.
"""
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms as horizon_forms
from horizon import tables as horizon_tables
from horizon import tabs as horizon_tabs
from horizon.utils import memoized
from horizon import workflows as horizon_workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.databases import forms
from openstack_dashboard.dashboards.project.databases import tables
from openstack_dashboard.dashboards.project.databases import tabs
from openstack_dashboard.dashboards.project.databases import workflows
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class IndexView(horizon_tables.DataTableView):
table_class = tables.InstancesTable
template_name = 'project/databases/index.html'
def has_more_data(self, table):
return self._more
@memoized.memoized_method
def get_flavors(self):
try:
flavors = api.trove.flavor_list(self.request)
except Exception:
flavors = []
msg = _('Unable to retrieve database size information.')
exceptions.handle(self.request, msg)
return SortedDict((unicode(flavor.id), flavor) for flavor in flavors)
def _extra_data(self, instance):
flavor = self.get_flavors().get(instance.flavor["id"])
if flavor is not None:
instance.full_flavor = flavor
instance.host = tables.get_host(instance)
return instance
def get_data(self):
marker = self.request.GET.get(
tables.InstancesTable._meta.pagination_param)
# Gather our instances
try:
instances = api.trove.instance_list(self.request, marker=marker)
self._more = instances.next or False
except Exception:
self._more = False
instances = []
msg = _('Unable to retrieve database instances.')
exceptions.handle(self.request, msg)
map(self._extra_data, instances)
return instances
class LaunchInstanceView(horizon_workflows.WorkflowView):
workflow_class = workflows.LaunchInstance
template_name = "project/databases/launch.html"
def get_initial(self):
initial = super(LaunchInstanceView, self).get_initial()
initial['project_id'] = self.request.user.project_id
initial['user_id'] = self.request.user.id
return initial
class DetailView(horizon_tabs.TabbedTableView):
tab_group_class = tabs.InstanceDetailTabs
template_name = 'project/databases/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
instance = self.get_data()
table = tables.InstancesTable(self.request)
context["instance"] = instance
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(instance)
context["page_title"] = _("Instance Details: "
"%(instance_name)s") % {'instance_name':
instance.name}
return context
@memoized.memoized_method
def get_data(self):
try:
LOG.info("Obtaining instance for detailed view ")
instance_id = self.kwargs['instance_id']
instance = api.trove.instance_get(self.request, instance_id)
instance.host = tables.get_host(instance)
except Exception:
msg = _('Unable to retrieve details '
'for database instance: %s') % instance_id
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
try:
instance.full_flavor = api.trove.flavor_get(
self.request, instance.flavor["id"])
except Exception:
LOG.error('Unable to retrieve flavor details'
' for database instance: %s' % instance_id)
return instance
def get_tabs(self, request, *args, **kwargs):
instance = self.get_data()
return self.tab_group_class(request, instance=instance, **kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:databases:index')
class ResizeVolumeView(horizon_forms.ModalFormView):
form_class = forms.ResizeVolumeForm
template_name = 'project/databases/resize_volume.html'
success_url = reverse_lazy('horizon:project:databases:index')
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.trove.instance_get(self.request, instance_id)
except Exception:
msg = _('Unable to retrieve instance details.')
redirect = reverse('horizon:project:databases:index')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ResizeVolumeView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
return context
def get_initial(self):
instance = self.get_object()
return {'instance_id': self.kwargs['instance_id'],
'orig_size': instance.volume.get('size', 0)}
class ResizeInstanceView(horizon_forms.ModalFormView):
form_class = forms.ResizeInstanceForm
template_name = 'project/databases/resize_instance.html'
success_url = reverse_lazy('horizon:project:databases:index')
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
instance = api.trove.instance_get(self.request, instance_id)
flavor_id = instance.flavor['id']
flavors = {}
for i, j in self.get_flavors():
flavors[str(i)] = j
if flavor_id in flavors:
instance.flavor_name = flavors[flavor_id]
else:
flavor = api.trove.flavor_get(self.request, flavor_id)
instance.flavor_name = flavor.name
return instance
except Exception:
redirect = reverse('horizon:project:databases:index')
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ResizeInstanceView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
return context
@memoized.memoized_method
def get_flavors(self, *args, **kwargs):
try:
flavors = api.trove.flavor_list(self.request)
return instance_utils.sort_flavor_list(self.request, flavors)
except Exception:
redirect = reverse("horizon:project:databases:index")
exceptions.handle(self.request,
_('Unable to retrieve flavors.'),
redirect=redirect)
def get_initial(self):
initial = super(ResizeInstanceView, self).get_initial()
obj = self.get_object()
if obj:
initial.update({'instance_id': self.kwargs['instance_id'],
'old_flavor_id': obj.flavor['id'],
'old_flavor_name': getattr(obj,
'flavor_name', ''),
'flavors': self.get_flavors()})
return initial
| apache-2.0 |
wjwwood/googlemock | scripts/fuse_gmock_files.py | 729 | 8606 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/gtest
sub-directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to googlemock@googlegroups.com. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into gtest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, 'gtest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, 'gtest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| bsd-3-clause |
nnethercote/servo | tests/wpt/webgl/tests/closure-library/closure/bin/build/closurebuilder.py | 134 | 9626 | #!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a manifest in dependency order,
a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
__author__ = 'nnaze@google.com (Nathan Naze)'
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('-j',
'--jvm_flags',
dest='jvm_flags',
default=[],
action='append',
help='Additional flags to pass to the JVM compiler. '
'To pass multiple flags, --jvm_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
real paths for comparison.
"""
for js_source in sources:
# Convert both to real paths for comparison.
if os.path.realpath(path) == os.path.realpath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
base_files = [
js_source for js_source in sources if _IsClosureBaseFile(js_source)]
if not base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
return (os.path.basename(js_source.GetPath()) == 'base.js' and
js_source.provides == set(['goog']))
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def __str__(self):
return 'PathSource %s' % self._path
def GetPath(self):
"""Returns the path."""
return self._path
def _WrapGoogModuleSource(src):
return ('goog.loadModule(function(exports) {{'
'"use strict";'
'{0}'
'\n' # terminate any trailing single line comment.
';return exports'
'}});\n').format(src)
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for js_path in args:
sources.add(_PathSource(js_path))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
for js_source in deps:
src = js_source.GetSource()
if js_source.is_goog_module:
src = _WrapGoogModuleSource(src)
out.write(src + '\n')
elif output_mode == 'compiled':
logging.warning("""\
Closure Compiler now natively understands and orders Closure dependencies and
is prefererred over using this script for performing JavaScript compilation.
Please migrate your codebase.
See:
https://github.com/google/closure-compiler/wiki/Manage-Closure-Dependencies
""")
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
# Will throw an error if the compilation fails.
compiled_source = jscompiler.Compile(
options.compiler_jar,
[js_source.GetPath() for js_source in deps],
jvm_flags=options.jvm_flags,
compiler_flags=options.compiler_flags)
logging.info('JavaScript compilation succeeded.')
out.write(compiled_source)
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
| mpl-2.0 |
apixandru/intellij-community | python/lib/Lib/site-packages/django/conf/locale/cs/formats.py | 232 | 1288 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. E Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 |
idovear/odoo | addons/account/wizard/account_vat.py | 378 | 2896 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_vat_declaration(osv.osv_memory):
_name = 'account.vat.declaration'
_description = 'Account Vat Declaration'
_inherit = "account.common.report"
_columns = {
'based_on': fields.selection([('invoices', 'Invoices'),
('payments', 'Payments'),],
'Based on', required=True),
'chart_tax_id': fields.many2one('account.tax.code', 'Chart of Tax', help='Select Charts of Taxes', required=True, domain = [('parent_id','=', False)]),
'display_detail': fields.boolean('Display Detail'),
}
def _get_tax(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
taxes = self.pool.get('account.tax.code').search(cr, uid, [('parent_id', '=', False), ('company_id', '=', user.company_id.id)], limit=1)
return taxes and taxes[0] or False
_defaults = {
'based_on': 'invoices',
'chart_tax_id': _get_tax
}
def create_vat(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
datas['model'] = 'account.tax.code'
datas['form'] = self.read(cr, uid, ids, context=context)[0]
for field in datas['form'].keys():
if isinstance(datas['form'][field], tuple):
datas['form'][field] = datas['form'][field][0]
taxcode_obj = self.pool.get('account.tax.code')
taxcode_id = datas['form']['chart_tax_id']
taxcode = taxcode_obj.browse(cr, uid, [taxcode_id], context=context)[0]
datas['form']['company_id'] = taxcode.company_id.id
return self.pool['report'].get_action(cr, uid, [], 'account.report_vat', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
python-dirbtuves/Misago | misago/apps/usercp/avatar/views.py | 2 | 11249 | from path import path
from PIL import Image
from unidecode import unidecode
from zipfile import is_zipfile
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from misago import messages
from misago.apps.errors import error404
from misago.conf import settings
from misago.decorators import block_guest
from misago.messages import Message
from misago.shortcuts import render_to_response
from misago.utils.strings import random_string
from misago.utils.avatars import resizeimage
from misago.apps.usercp.template import RequestContext
from misago.apps.usercp.avatar.forms import UploadAvatarForm
def avatar_view(f):
def decorator(*args, **kwargs):
request = args[0]
if request.user.avatar_ban:
return render_to_response('usercp/avatar_banned.html',
context_instance=RequestContext(request, {
'tab': 'avatar'}));
return f(*args, **kwargs)
return decorator
@block_guest
@avatar_view
def avatar(request):
message = request.messages.get_message('usercp_avatar')
return render_to_response('usercp/avatar.html',
context_instance=RequestContext(request, {
'message': message,
'tab': 'avatar'}));
@block_guest
@avatar_view
def gravatar(request):
if not 'gravatar' in settings.avatars_types:
return error404(request)
if request.user.avatar_type != 'gravatar':
if request.csrf.request_secure(request):
request.user.delete_avatar()
request.user.avatar_type = 'gravatar'
request.user.save(force_update=True)
messages.success(request, _("Your avatar has been changed to Gravatar."), 'usercp_avatar')
else:
messages.error(request, _("Request authorisation is invalid."), 'usercp_avatar')
return redirect(reverse('usercp_avatar'))
@block_guest
@avatar_view
def gallery(request):
if not 'gallery' in settings.avatars_types:
return error404(request)
allowed_avatars = []
galleries = []
for directory in path(settings.STATICFILES_DIRS[0]).joinpath('avatars').dirs():
if directory[-7:] != '_locked' and directory[-8:] != '_default':
gallery = {'name': directory[-7:], 'avatars': []}
avatars = directory.files('*.gif')
avatars += directory.files('*.jpg')
avatars += directory.files('*.jpeg')
avatars += directory.files('*.png')
for item in avatars:
gallery['avatars'].append('/'.join(path(item).splitall()[-2:]))
galleries.append(gallery)
allowed_avatars += gallery['avatars']
if not allowed_avatars:
messages.info(request, _("No avatar galleries are available at the moment."), 'usercp_avatar')
return redirect(reverse('usercp_avatar'))
message = request.messages.get_message('usercp_avatar')
if request.method == 'POST':
if request.csrf.request_secure(request):
new_avatar = request.POST.get('avatar_image')
if new_avatar in allowed_avatars:
request.user.delete_avatar()
request.user.avatar_type = 'gallery'
request.user.avatar_image = new_avatar
request.user.save(force_update=True)
messages.success(request, _("Your avatar has been changed to one from gallery."), 'usercp_avatar')
return redirect(reverse('usercp_avatar'))
message = Message(_("Selected Avatar is incorrect."), messages.ERROR)
else:
message = Message(_("Request authorisation is invalid."), messages.ERROR)
return render_to_response('usercp/avatar_gallery.html',
context_instance=RequestContext(request, {
'message': message,
'galleries': galleries,
'tab': 'avatar'}));
@block_guest
@avatar_view
def upload(request):
if not 'upload' in settings.avatars_types:
return error404(request)
message = request.messages.get_message('usercp_avatar')
if request.method == 'POST':
form = UploadAvatarForm(request.POST, request.FILES, request=request)
if form.is_valid():
request.user.delete_avatar_temp()
image = form.cleaned_data['avatar_upload']
image_name, image_extension = path(unidecode(image.name.lower())).splitext()
image_name = '%s_tmp_%s%s' % (request.user.pk, random_string(8), image_extension)
image_path = settings.MEDIA_ROOT + 'avatars/' + image_name
request.user.avatar_temp = image_name
with open(image_path, 'wb+') as destination:
for chunk in image.chunks():
destination.write(chunk)
request.user.save()
try:
if is_zipfile(image_path):
# Composite file upload
raise ValidationError()
image = Image.open(image_path)
if not image.format in ['GIF', 'PNG', 'JPEG']:
raise ValidationError()
image.seek(0)
image.save(image_path)
if request.POST.get('js_check'):
return redirect(reverse('usercp_avatar_upload_crop'))
# Redirect to crop page didnt happen, handle avatar with old school hollywood way
image_path = settings.MEDIA_ROOT + 'avatars/'
source = Image.open(image_path + request.user.avatar_temp)
image_name, image_extension = path(request.user.avatar_temp).splitext()
image_name = '%s_%s%s' % (request.user.pk, random_string(8), image_extension)
resizeimage(source, settings.AVATAR_SIZES[0], image_path + image_name, info=source.info, format=source.format)
for size in settings.AVATAR_SIZES[1:]:
resizeimage(source, size, image_path + str(size) + '_' + image_name, info=source.info, format=source.format)
# Update user model one more time
request.user.delete_avatar_image()
request.user.delete_avatar_original()
request.user.avatar_type = 'upload'
request.user.avatar_original = '%s_org_%s%s' % (request.user.pk, random_string(8), image_extension)
source.save(image_path + request.user.avatar_original)
request.user.delete_avatar_temp()
request.user.avatar_image = image_name
request.user.save(force_update=True)
# Set message and adios!
messages.success(request, _("Your avatar has changed."), 'usercp_avatar')
return redirect(reverse('usercp_avatar'))
except ValidationError:
request.user.delete_avatar()
request.user.default_avatar()
message = Message(_("Only gif, jpeg and png files are allowed for member avatars."), messages.ERROR)
else:
message = Message(form.non_field_errors()[0], messages.ERROR)
else:
form = UploadAvatarForm(request=request)
return render_to_response('usercp/avatar_upload.html',
context_instance=RequestContext(request, {
'message': message,
'form': form,
'tab': 'avatar'}));
@block_guest
@avatar_view
def crop(request, upload=False):
if upload and (not request.user.avatar_temp or not 'upload' in settings.avatars_types):
return error404(request)
if not upload and request.user.avatar_type != 'upload':
messages.error(request, _("Crop Avatar option is avaiable only when you use uploaded image as your avatar."), 'usercp_avatar')
return redirect(reverse('usercp_avatar'))
message = request.messages.get_message('usercp_avatar')
if request.method == 'POST':
if request.csrf.request_secure(request):
try:
image_path = settings.MEDIA_ROOT + 'avatars/'
if upload:
source = Image.open(image_path + request.user.avatar_temp)
else:
source = Image.open(image_path + request.user.avatar_original)
width, height = source.size
aspect = float(width) / float(request.POST['crop_b'])
crop_x = int(aspect * float(request.POST['crop_x']))
crop_y = int(aspect * float(request.POST['crop_y']))
crop_w = int(aspect * float(request.POST['crop_w']))
crop = source.crop((crop_x, crop_y, crop_x + crop_w, crop_y + crop_w))
if upload:
image_name, image_extension = path(request.user.avatar_temp).splitext()
else:
image_name, image_extension = path(request.user.avatar_original).splitext()
image_name = '%s_%s%s' % (request.user.pk, random_string(8), image_extension)
resizeimage(crop, settings.AVATAR_SIZES[0], image_path + image_name, info=source.info, format=source.format)
for size in settings.AVATAR_SIZES[1:]:
resizeimage(crop, size, image_path + str(size) + '_' + image_name, info=source.info, format=source.format)
request.user.delete_avatar_image()
if upload:
request.user.delete_avatar_original()
request.user.avatar_type = 'upload'
request.user.avatar_original = '%s_org_%s%s' % (request.user.pk, random_string(8), image_extension)
source.save(image_path + request.user.avatar_original)
request.user.delete_avatar_temp()
request.user.avatar_image = image_name
request.user.avatar_crop = [str(float(request.POST[x])) for x in ('crop_x', 'crop_y', 'crop_w')]
request.user.save(force_update=True)
messages.success(request, _("Your avatar has been cropped."), 'usercp_avatar')
return redirect(reverse('usercp_avatar'))
except Exception:
message = Message(_("Form contains errors."), messages.ERROR)
else:
message = Message(_("Request authorisation is invalid."), messages.ERROR)
return render_to_response('usercp/avatar_crop.html',
context_instance=RequestContext(request, {
'message': message,
'after_upload': upload,
'avatar_size': settings.AVATAR_SIZES[0],
'avatar_crop': request.user.avatar_crop if not upload else None,
'source': 'avatars/%s' % (request.user.avatar_temp if upload else request.user.avatar_original),
'tab': 'avatar'}));
| gpl-3.0 |
Umang88/Radon-Kenzo | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
internetarchive/warctools | hanzo/warcextract.py | 1 | 1774 | #!/usr/bin/env python
"""warcextract - dump warc record context to standard out"""
from __future__ import print_function
import os
import sys
import sys
import os.path
from optparse import OptionParser
from contextlib import closing
from .warctools import WarcRecord
parser = OptionParser(usage="%prog [options] warc offset")
#parser.add_option("-l", "--limit", dest="limit")
parser.add_option("-I", "--input", dest="input_format")
parser.add_option("-L", "--log-level", dest="log_level")
parser.set_defaults(output_directory=None, limit=None, log_level="info")
def main(argv):
(options, args) = parser.parse_args(args=argv[1:])
try: # python3
out = sys.stdout.buffer
except AttributeError: # python2
out = sys.stdout
if len(args) < 1:
# dump the first record on stdin
with closing(WarcRecord.open_archive(file_handle=sys.stdin, gzip=None)) as fh:
dump_record(fh, out)
else:
# dump a record from the filename, with optional offset
filename = args[0]
if len(args) > 1:
offset = int(args[1])
else:
offset = 0
with closing(WarcRecord.open_archive(filename=filename, gzip="auto")) as fh:
fh.seek(offset)
dump_record(fh, out)
return 0
def dump_record(fh, out):
for (offset, record, errors) in fh.read_records(limit=1, offsets=False):
if record:
out.write(record.content[1])
elif errors:
print("warc errors at %s:%d"%(name, offset if offset else 0), file=sys.stderr)
for e in errors:
print('\t', e)
break # only use one (I'm terrible)
def run():
sys.exit(main(sys.argv))
if __name__ == '__main__':
run()
| mit |
mosajjal/mitmproxy | test/mitmproxy/test_proxy.py | 2 | 4742 | import os
import argparse
from unittest import mock
from OpenSSL import SSL
import pytest
from mitmproxy.tools import cmdline
from mitmproxy import options
from mitmproxy.proxy import ProxyConfig
from mitmproxy.proxy.server import DummyServer, ProxyServer, ConnectionHandler
from mitmproxy.proxy import config
from mitmproxy.test import tutils
from ..conftest import skip_windows
class MockParser(argparse.ArgumentParser):
"""
argparse.ArgumentParser sys.exits() by default.
Make it more testable by throwing an exception instead.
"""
def error(self, message):
raise Exception(message)
class TestProcessProxyOptions:
def p(self, *args):
parser = MockParser()
cmdline.common_options(parser)
args = parser.parse_args(args=args)
opts = options.Options()
opts.merge(cmdline.get_common_options(args))
pconf = config.ProxyConfig(opts)
return parser, pconf
def assert_noerr(self, *args):
m, p = self.p(*args)
assert p
return p
def test_simple(self):
assert self.p()
def test_cadir(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--cadir", cadir)
@mock.patch("mitmproxy.platform.original_addr", None)
def test_no_transparent(self):
with pytest.raises(Exception, match="Transparent mode not supported"):
self.p("-T")
@mock.patch("mitmproxy.platform.original_addr")
def test_modes(self, _):
self.assert_noerr("-R", "http://localhost")
with pytest.raises(Exception, match="expected one argument"):
self.p("-R")
with pytest.raises(Exception, match="Invalid server specification"):
self.p("-R", "reverse")
self.assert_noerr("-T")
self.assert_noerr("-U", "http://localhost")
with pytest.raises(Exception, match="Invalid server specification"):
self.p("-U", "upstream")
self.assert_noerr("--upstream-auth", "test:test")
with pytest.raises(Exception, match="expected one argument"):
self.p("--upstream-auth")
with pytest.raises(Exception, match="mutually exclusive"):
self.p("-R", "http://localhost", "-T")
def test_client_certs(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--client-certs", cadir)
self.assert_noerr(
"--client-certs",
os.path.join(tutils.test_data.path("mitmproxy/data/clientcert"), "client.pem"))
with pytest.raises(Exception, match="path does not exist"):
self.p("--client-certs", "nonexistent")
def test_certs(self):
self.assert_noerr(
"--cert",
tutils.test_data.path("mitmproxy/data/testkey.pem"))
with pytest.raises(Exception, match="does not exist"):
self.p("--cert", "nonexistent")
def test_insecure(self):
p = self.assert_noerr("--insecure")
assert p.openssl_verification_mode_server == SSL.VERIFY_NONE
def test_upstream_trusted_cadir(self):
expected_dir = "/path/to/a/ca/dir"
p = self.assert_noerr("--upstream-trusted-cadir", expected_dir)
assert p.options.ssl_verify_upstream_trusted_cadir == expected_dir
def test_upstream_trusted_ca(self):
expected_file = "/path/to/a/cert/file"
p = self.assert_noerr("--upstream-trusted-ca", expected_file)
assert p.options.ssl_verify_upstream_trusted_ca == expected_file
class TestProxyServer:
@skip_windows
def test_err(self):
# binding to 0.0.0.0:1 works without special permissions on Windows
conf = ProxyConfig(options.Options(listen_port=1))
with pytest.raises(Exception, match="Error starting proxy server"):
ProxyServer(conf)
def test_err_2(self):
conf = ProxyConfig(options.Options(listen_host="256.256.256.256"))
with pytest.raises(Exception, match="Error starting proxy server"):
ProxyServer(conf)
class TestDummyServer:
def test_simple(self):
d = DummyServer(None)
d.set_channel(None)
d.shutdown()
class TestConnectionHandler:
def test_fatal_error(self, capsys):
config = mock.Mock()
root_layer = mock.Mock()
root_layer.side_effect = RuntimeError
config.options.mode.return_value = root_layer
channel = mock.Mock()
def ask(_, x):
return x
channel.ask = ask
c = ConnectionHandler(
mock.MagicMock(),
("127.0.0.1", 8080),
config,
channel
)
c.handle()
_, err = capsys.readouterr()
assert "mitmproxy has crashed" in err
| mit |
nabc2017/www | beta/phpmyadmin/doc/_ext/configext.py | 141 | 6618 | from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.domains.std import GenericObject, StandardDomain
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util import ws_re
from sphinx import addnodes
from sphinx.util.docfields import Field
from docutils import nodes
def get_id_from_cfg(text):
'''
Formats anchor ID from config option.
'''
if text[:6] == '$cfg[\'':
text = text[6:]
if text[-2:] == '\']':
text = text[:-2]
text = text.replace('[$i]', '')
parts = text.split("']['")
return parts
class ConfigOption(ObjectDescription):
indextemplate = 'configuration option; %s'
parse_node = None
has_arguments = True
doc_field_types = [
Field('default', label='Default value', has_arg=False,
names=('default', )),
Field('type', label='Type', has_arg=False,
names=('type',)),
]
def handle_signature(self, sig, signode):
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetparts = get_id_from_cfg(name)
targetname = 'cfg_%s' % '_'.join(targetparts)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
indextype = 'single'
# Generic index entries
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.indexnode['entries'].append((indextype, name,
targetname, targetname))
# Server section
if targetparts[0] == 'Servers' and len(targetparts) > 1:
indexname = ', '.join(targetparts[1:])
self.indexnode['entries'].append((indextype, 'server configuration; %s' % indexname,
targetname, targetname))
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
else:
indexname = ', '.join(targetparts)
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigSectionXRefRole(XRefRole):
"""
Cross-referencing role for configuration sections (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration section; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigSection(ObjectDescription):
indextemplate = 'configuration section; %s'
parse_node = None
def handle_signature(self, sig, signode):
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon+1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigOptionXRefRole(XRefRole):
"""
Cross-referencing role for configuration options (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration option; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigFileDomain(Domain):
name = 'config'
label = 'Config'
object_types = {
'option': ObjType('config option', 'option'),
'section': ObjType('config section', 'section'),
}
directives = {
'option': ConfigOption,
'section': ConfigSection,
}
roles = {
'option': ConfigOptionXRefRole(),
'section': ConfigSectionXRefRole(),
}
initial_data = {
'objects': {}, # (type, name) -> docname, labelid
}
def clear_doc(self, docname):
for key, (fn, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][key]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
docname, labelid = self.data['objects'].get((typ, target), ('', ''))
if not docname:
return None
else:
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def get_objects(self):
for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
def setup(app):
app.add_domain(ConfigFileDomain)
| gpl-2.0 |
genos/online_problems | euler/previous/euler70.py | 1 | 1194 | #!/usr/bin/env python
# euler70.py
from __future__ import division
import subprocess
def prime_list_faster(m, n):
"""
Returns a list of primes in [m, n].
Requires the outside program primes from http://cr.yp.to/primegen.html, so
this is really just an exercise in using the subprocess module.
"""
primes_output = subprocess.Popen(['primes', '%s'%m, '%s'%n], shell=False,
stdout=subprocess.PIPE).communicate()[0]
return [int(p) for p in primes_output.split()]
def totient(p, q):
if (q == 1):
return p - 1
elif (p == 1):
return q - 1
else:
return (p - 1) * (q - 1)
def is_perm(a, b):
return sorted(str(a)) == sorted(str(b))
def main():
p_list = prime_list_faster(1e3, 5e4)
min_n, min_phi, min_ratio = 6, 2, 3
for p in p_list:
for q in p_list:
n = p*q
if n < 1e7:
phi = totient(p, q)
ratio = n/phi
if (is_perm(n, phi) and ratio < min_ratio):
min_n, min_phi, min_ratio = n, phi, ratio
print(min_n, min_phi, min_ratio)
return
if __name__ == '__main__':
main()
| mit |
sysalexis/kbengine | kbe/res/scripts/common/Lib/sqlite3/test/hooks.py | 86 | 9414 | #-*- coding: iso-8859-1 -*-
# pysqlite2/test/hooks.py: tests for various SQLite-specific hooks
#
# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sqlite3 as sqlite
class CollationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def CheckCreateCollationNotCallable(self):
con = sqlite.connect(":memory:")
try:
con.create_collation("X", 42)
self.fail("should have raised a TypeError")
except TypeError as e:
self.assertEqual(e.args[0], "parameter must be callable")
def CheckCreateCollationNotAscii(self):
con = sqlite.connect(":memory:")
try:
con.create_collation("collä", lambda x, y: (x > y) - (x < y))
self.fail("should have raised a ProgrammingError")
except sqlite.ProgrammingError as e:
pass
@unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 1),
'old SQLite versions crash on this test')
def CheckCollationIsUsed(self):
def mycoll(x, y):
# reverse order
return -((x > y) - (x < y))
con = sqlite.connect(":memory:")
con.create_collation("mycoll", mycoll)
sql = """
select x from (
select 'a' as x
union
select 'b' as x
union
select 'c' as x
) order by x collate mycoll
"""
result = con.execute(sql).fetchall()
if result[0][0] != "c" or result[1][0] != "b" or result[2][0] != "a":
self.fail("the expected order was not returned")
con.create_collation("mycoll", None)
try:
result = con.execute(sql).fetchall()
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0].lower(), "no such collation sequence: mycoll")
def CheckCollationReturnsLargeInteger(self):
def mycoll(x, y):
# reverse order
return -((x > y) - (x < y)) * 2**32
con = sqlite.connect(":memory:")
con.create_collation("mycoll", mycoll)
sql = """
select x from (
select 'a' as x
union
select 'b' as x
union
select 'c' as x
) order by x collate mycoll
"""
result = con.execute(sql).fetchall()
self.assertEqual(result, [('c',), ('b',), ('a',)],
msg="the expected order was not returned")
def CheckCollationRegisterTwice(self):
"""
Register two different collation functions under the same name.
Verify that the last one is actually used.
"""
con = sqlite.connect(":memory:")
con.create_collation("mycoll", lambda x, y: (x > y) - (x < y))
con.create_collation("mycoll", lambda x, y: -((x > y) - (x < y)))
result = con.execute("""
select x from (select 'a' as x union select 'b' as x) order by x collate mycoll
""").fetchall()
if result[0][0] != 'b' or result[1][0] != 'a':
self.fail("wrong collation function is used")
def CheckDeregisterCollation(self):
"""
Register a collation, then deregister it. Make sure an error is raised if we try
to use it.
"""
con = sqlite.connect(":memory:")
con.create_collation("mycoll", lambda x, y: (x > y) - (x < y))
con.create_collation("mycoll", None)
try:
con.execute("select 'a' as x union select 'b' as x order by x collate mycoll")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
if not e.args[0].startswith("no such collation sequence"):
self.fail("wrong OperationalError raised")
class ProgressTests(unittest.TestCase):
def CheckProgressHandlerUsed(self):
"""
Test that the progress handler is invoked once it is set.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
con.execute("""
create table foo(a, b)
""")
self.assertTrue(progress_calls)
def CheckOpcodeCount(self):
"""
Test that the opcode argument is respected.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
curs = con.cursor()
curs.execute("""
create table foo (a, b)
""")
first_count = len(progress_calls)
progress_calls = []
con.set_progress_handler(progress, 2)
curs.execute("""
create table bar (a, b)
""")
second_count = len(progress_calls)
self.assertGreaterEqual(first_count, second_count)
def CheckCancelOperation(self):
"""
Test that returning a non-zero value stops the operation in progress.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 1
con.set_progress_handler(progress, 1)
curs = con.cursor()
self.assertRaises(
sqlite.OperationalError,
curs.execute,
"create table bar (a, b)")
def CheckClearHandler(self):
"""
Test that setting the progress handler to None clears the previously set handler.
"""
con = sqlite.connect(":memory:")
action = 0
def progress():
nonlocal action
action = 1
return 0
con.set_progress_handler(progress, 1)
con.set_progress_handler(None, 1)
con.execute("select 1 union select 2 union select 3").fetchall()
self.assertEqual(action, 0, "progress handler was not cleared")
class TraceCallbackTests(unittest.TestCase):
def CheckTraceCallbackUsed(self):
"""
Test that the trace callback is invoked once it is set.
"""
con = sqlite.connect(":memory:")
traced_statements = []
def trace(statement):
traced_statements.append(statement)
con.set_trace_callback(trace)
con.execute("create table foo(a, b)")
self.assertTrue(traced_statements)
self.assertTrue(any("create table foo" in stmt for stmt in traced_statements))
def CheckClearTraceCallback(self):
"""
Test that setting the trace callback to None clears the previously set callback.
"""
con = sqlite.connect(":memory:")
traced_statements = []
def trace(statement):
traced_statements.append(statement)
con.set_trace_callback(trace)
con.set_trace_callback(None)
con.execute("create table foo(a, b)")
self.assertFalse(traced_statements, "trace callback was not cleared")
def CheckUnicodeContent(self):
"""
Test that the statement can contain unicode literals.
"""
unicode_value = '\xf6\xe4\xfc\xd6\xc4\xdc\xdf\u20ac'
con = sqlite.connect(":memory:")
traced_statements = []
def trace(statement):
traced_statements.append(statement)
con.set_trace_callback(trace)
con.execute("create table foo(x)")
# Can't execute bound parameters as their values don't appear
# in traced statements before SQLite 3.6.21
# (cf. http://www.sqlite.org/draft/releaselog/3_6_21.html)
con.execute('insert into foo(x) values ("%s")' % unicode_value)
con.commit()
self.assertTrue(any(unicode_value in stmt for stmt in traced_statements),
"Unicode data %s garbled in trace callback: %s"
% (ascii(unicode_value), ', '.join(map(ascii, traced_statements))))
def suite():
collation_suite = unittest.makeSuite(CollationTests, "Check")
progress_suite = unittest.makeSuite(ProgressTests, "Check")
trace_suite = unittest.makeSuite(TraceCallbackTests, "Check")
return unittest.TestSuite((collation_suite, progress_suite, trace_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| lgpl-3.0 |
bjodah/batemaneq | scripts/coverage_badge.py | 10 | 1892 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script generates a "coverage" badge as a svg file from
the html report from coverage.py
Usage:
$ ./coverage_badge.py htmlcov/ coverage.svg
"""
from __future__ import (absolute_import, division, print_function)
import os
# this template was generated from shields.io on 2015-10-11
template = """
<svg xmlns="http://www.w3.org/2000/svg" width="92" height="20">
<linearGradient id="b" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<mask id="a">
<rect width="92" height="20" rx="3" fill="#fff"/>
</mask>
<g mask="url(#a)">
<path fill="#555" d="M0 0h63v20H0z"/>
<path fill="{0:s}" d="M63 0h29v20H63z"/>
<path fill="url(#b)" d="M0 0h92v20H0z"/>
</g>
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,
sans-serif" font-size="11">
<text x="31.5" y="15" fill="#010101" fill-opacity=".3">coverage</text>
<text x="31.5" y="14">coverage</text>
<text x="76.5" y="15" fill="#010101" fill-opacity=".3">{1:s}%</text>
<text x="76.5" y="14">{1:s}%</text>
</g>
</svg>
"""
def get_coverage(htmldir):
for line in open(os.path.join(htmldir, 'index.html'), 'rt'):
if 'pc_cov' in line:
return int(line.split('pc_cov')[1].split(
'>')[1].split('<')[0].rstrip('%'))
raise ValueError("Could not find pc_cov in index.html")
def write_cov_badge_svg(path, percent):
colors = '#e05d44 #fe7d37 #dfb317 #a4a61d #97CA00 #4c1'.split()
limits_le = 50, 60, 70, 80, 90, 100
c = next(clr for lim, clr in zip(limits_le, colors) if percent <= lim)
with open(path, 'wt') as f:
f.write(template.format(c, str(percent)))
if __name__ == '__main__':
import sys
assert len(sys.argv) == 3
cov_percent = get_coverage(sys.argv[1])
write_cov_badge_svg(sys.argv[2], cov_percent)
| bsd-2-clause |
sullivanmatt/splunk-sdk-python | examples/search.py | 1 | 3867 | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A command line utility for executing Splunk searches."""
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from time import sleep
from splunklib.binding import HTTPError
import splunklib.client as client
try:
from utils import *
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
FLAGS_TOOL = [ "verbose" ]
FLAGS_CREATE = [
"earliest_time", "latest_time", "now", "time_format",
"exec_mode", "search_mode", "rt_blocking", "rt_queue_size",
"rt_maxblocksecs", "rt_indexfilter", "id", "status_buckets",
"max_count", "max_time", "timeout", "auto_finalize_ec", "enable_lookups",
"reload_macros", "reduce_freq", "spawn_process", "required_field_list",
"rf", "auto_cancel", "auto_pause",
]
FLAGS_RESULTS = [
"offset", "count", "search", "field_list", "f", "output_mode"
]
def cmdline(argv, flags, **kwargs):
"""A cmdopts wrapper that takes a list of flags and builds the
corresponding cmdopts rules to match those flags."""
rules = dict([(flag, {'flags': ["--%s" % flag]}) for flag in flags])
return parse(argv, rules, ".splunkrc", **kwargs)
def main(argv):
usage = 'usage: %prog [options] "search"'
flags = []
flags.extend(FLAGS_TOOL)
flags.extend(FLAGS_CREATE)
flags.extend(FLAGS_RESULTS)
opts = cmdline(argv, flags, usage=usage)
if len(opts.args) != 1:
error("Search expression required", 2)
search = opts.args[0]
verbose = opts.kwargs.get("verbose", 0)
kwargs_splunk = dslice(opts.kwargs, FLAGS_SPLUNK)
kwargs_create = dslice(opts.kwargs, FLAGS_CREATE)
kwargs_results = dslice(opts.kwargs, FLAGS_RESULTS)
service = client.connect(**kwargs_splunk)
try:
service.parse(search, parse_only=True)
except HTTPError as e:
cmdopts.error("query '%s' is invalid:\n\t%s" % (search, e.message), 2)
return
job = service.jobs.create(search, **kwargs_create)
while True:
while not job.is_ready():
pass
stats = {'isDone': job['isDone'],
'doneProgress': job['doneProgress'],
'scanCount': job['scanCount'],
'eventCount': job['eventCount'],
'resultCount': job['resultCount']}
progress = float(stats['doneProgress'])*100
scanned = int(stats['scanCount'])
matched = int(stats['eventCount'])
results = int(stats['resultCount'])
if verbose > 0:
status = ("\r%03.1f%% | %d scanned | %d matched | %d results" % (
progress, scanned, matched, results))
sys.stdout.write(status)
sys.stdout.flush()
if stats['isDone'] == '1':
if verbose > 0: sys.stdout.write('\n')
break
sleep(2)
if not kwargs_results.has_key('count'): kwargs_results['count'] = 0
results = job.results(**kwargs_results)
while True:
content = results.read(1024)
if len(content) == 0: break
sys.stdout.write(content)
sys.stdout.flush()
sys.stdout.write('\n')
job.cancel()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
unkyulee/elastic-cms | src/web/modules/task/controllers/task/edit.py | 1 | 1574 | from flask import render_template, request
import lib.es as es
import web.util.tools as tools
def get(p):
# load task
task_id = p['nav'][-1]
p['task'] = es.get(p['host'], 'core_task', 'task', task_id)
if not p['task']:
return tools.alert('task not found - {}'.format(task_id))
if request.method == "POST":
es.update(p['host'], 'core_task', 'task', task_id, {
'navigation_id': p['navigation']['id'],
'name': tools.get('name'),
'runat': tools.get('runat') if tools.get('runat') else 'anywhere' ,
'description': tools.get('description')
})
es.flush(p['host'], 'core_task')
return tools.redirect(request.referrer)
# load action list
# when there are no records then the task fails to run
option = ''
if es.count(p['host'], 'core_task', 'action'):
option = 'size=10000&sort=order_key:asc'
query = 'task_id:{}'.format(p['task']['id'])
p['action_list'] = es.list(p['host'], 'core_task', 'action',
query, option)
for action in p['action_list']:
action['module'] = es.get(p['host'], 'core_task', 'module', action['module_id'])
# load schedule list
query = 'task_id:{}'.format(p['task']['id'])
p['schedule_list'] = es.list(p['host'], 'core_task', 'schedule', query)
# load task module List
option = 'size=10000&sort=description:asc'
p['task_module_list'] = es.list(p['host'], 'core_task', 'module', '*', option)
return render_template("task/task/edit.html", p=p)
| mit |
AreaROM/android_external_skia | platform_tools/android/bin/gyp_to_android.py | 66 | 8846 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for generating the Android framework's version of Skia from gyp
files.
"""
import os
import shutil
import sys
import tempfile
# Find the top of trunk
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SKIA_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir,
os.pardir))
# Find the directory with our helper files, and add it to the path.
ANDROID_TOOLS = os.path.join(SKIA_DIR, 'platform_tools', 'android')
sys.path.append(ANDROID_TOOLS)
import gyp_gen.android_framework_gyp as android_framework_gyp
import gyp_gen.gypd_parser as gypd_parser
import gyp_gen.generate_user_config as generate_user_config
import gyp_gen.makefile_writer as makefile_writer
import gyp_gen.tool_makefile_writer as tool_makefile_writer
import gyp_gen.vars_dict_lib as vars_dict_lib
# Folder containing all gyp files and generated gypd files.
GYP_FOLDER = 'gyp'
def generate_var_dict(target_dir, target_file, skia_arch_type, have_neon):
"""Create a VarsDict for a particular arch type.
Each paramater is passed directly to android_framework_gyp.main().
Args:
target_dir: Directory containing gyp files.
target_file: Target gyp file.
skia_arch_type: Target architecture.
have_neon: Whether the target should build for neon.
Returns:
A VarsDict containing the variable definitions determined by gyp.
"""
result_file = android_framework_gyp.main(target_dir, target_file,
skia_arch_type, have_neon)
var_dict = vars_dict_lib.VarsDict()
gypd_parser.parse_gypd(var_dict, result_file, '.')
android_framework_gyp.clean_gypd_files(target_dir)
print '.',
return var_dict
def main(target_dir=None, require_sk_user_config=False):
"""Create Android.mk for the Android framework's external/skia.
Builds Android.mk using Skia's gyp files.
Args:
target_dir: Directory in which to place 'Android.mk'. If None, the file
will be placed in skia's root directory.
require_sk_user_config: If True, raise an AssertionError if
SkUserConfig.h does not exist.
"""
# Create a temporary folder to hold gyp and gypd files. Create it in SKIA_DIR
# so that it is a sibling of gyp/, so the relationships between gyp files and
# other files (e.g. platform_tools/android/gyp/dependencies.gypi, referenced
# by android_deps.gyp as a relative path) is unchanged.
# Use mkdtemp to find an unused folder name, but then delete it so copytree
# can be called with a non-existent directory.
tmp_folder = tempfile.mkdtemp(dir=SKIA_DIR)
os.rmdir(tmp_folder)
shutil.copytree(os.path.join(SKIA_DIR, GYP_FOLDER), tmp_folder)
try:
main_gyp_file = 'android_framework_lib.gyp'
print 'Creating Android.mk',
# Generate a separate VarsDict for each architecture type. For each
# archtype:
# 1. call android_framework_gyp.main() to generate gypd files
# 2. call parse_gypd to read those gypd files into the VarsDict
# 3. delete the gypd files
#
# Once we have the VarsDict for each architecture type, we combine them all
# into a single Android.mk file, which can build targets of any
# architecture type.
# The default uses a non-existant archtype, to find all the general
# variable definitions.
default_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'other',
False)
arm_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm', False)
arm_neon_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm',
True)
x86_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86', False)
mips_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips', False)
mips64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips64',
False)
arm64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm64',
False)
# Compute the intersection of all targets. All the files in the intersection
# should be part of the makefile always. Each dict will now contain trimmed
# lists containing only variable definitions specific to that configuration.
var_dict_list = [default_var_dict, arm_var_dict, arm_neon_var_dict,
x86_var_dict, mips_var_dict, mips64_var_dict,
arm64_var_dict]
common = vars_dict_lib.intersect(var_dict_list)
common.LOCAL_MODULE.add('libskia')
# Create SkUserConfig
user_config = os.path.join(SKIA_DIR, 'include', 'config', 'SkUserConfig.h')
if target_dir:
dst_dir = target_dir
else:
dst_dir = os.path.join(SKIA_DIR, 'include', 'core')
generate_user_config.generate_user_config(
original_sk_user_config=user_config,
require_sk_user_config=require_sk_user_config, target_dir=dst_dir,
ordered_set=common.DEFINES)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='tests.gyp',
skia_trunk=target_dir,
dest_dir='tests',
skia_lib_var_dict=common,
local_module_name='skia_test',
local_module_tags=['tests'])
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='bench.gyp',
skia_trunk=target_dir,
dest_dir='bench',
skia_lib_var_dict=common,
local_module_name='skia_bench',
local_module_tags=['tests'],
place_in_local_tmp=True)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='gm.gyp',
skia_trunk=target_dir,
dest_dir='gm',
skia_lib_var_dict=common,
local_module_name='skia_gm',
local_module_tags=['tests'])
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='dm.gyp',
skia_trunk=target_dir,
dest_dir='dm',
skia_lib_var_dict=common,
local_module_name='skia_dm',
local_module_tags=['tests'])
# Now that the defines have been written to SkUserConfig and they've been
# used to skip adding them to the tools makefiles, they are not needed in
# Android.mk. Reset DEFINES.
common.DEFINES.reset()
# Further trim arm_neon_var_dict with arm_var_dict. After this call,
# arm_var_dict (which will now be the intersection) includes all definitions
# used by both arm and arm + neon, and arm_neon_var_dict will only contain
# those specific to arm + neon.
arm_var_dict = vars_dict_lib.intersect([arm_var_dict, arm_neon_var_dict])
# Now create a list of VarsDictData holding everything but common.
deviations_from_common = []
deviations_from_common.append(makefile_writer.VarsDictData(
arm_var_dict, 'arm'))
deviations_from_common.append(makefile_writer.VarsDictData(
arm_neon_var_dict, 'arm', 'ARCH_ARM_HAVE_NEON'))
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86'))
# Currently, x86_64 is identical to x86
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86_64'))
deviations_from_common.append(makefile_writer.VarsDictData(mips_var_dict,
'mips'))
deviations_from_common.append(makefile_writer.VarsDictData(mips64_var_dict,
'mips64'))
deviations_from_common.append(makefile_writer.VarsDictData(arm64_var_dict,
'arm64'))
makefile_writer.write_android_mk(target_dir=target_dir,
common=common, deviations_from_common=deviations_from_common)
finally:
shutil.rmtree(tmp_folder)
if __name__ == '__main__':
main()
| bsd-3-clause |
kangbiao/tornado | tornado/test/simple_httpclient_test.py | 7 | 27084 | from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import ssl
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders, ResponseStartLine
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, unittest
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@asynchronous
def get(self):
if self.request.version.startswith('HTTP/1'):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.detach()
stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
else:
self.finish('HTTP/1 required')
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
timeout = 0.1
timeout_min, timeout_max = 0.099, 0.15
if os.name == 'nt':
timeout = 0.5
timeout_min, timeout_max = 0.4, 0.6
response = self.fetch('/trigger?wake=false', request_timeout=timeout)
self.assertEqual(response.code, 599)
self.assertTrue(timeout_min < response.request_time < timeout_max,
response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
#
# A test without a content-length header is included below
# in HTTP204NoContentTestCase.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, "Malformed HTTP message"):
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
if response.body == b"HTTP/1 required":
self.skipTest("requires HTTP/1.x")
else:
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
def test_ssl_options(self):
resp = self.fetch("/hello", ssl_options={})
self.assertEqual(resp.body, b"Hello world!")
@unittest.skipIf(not hasattr(ssl, 'SSLContext'),
'ssl.SSLContext not present')
def test_ssl_context(self):
resp = self.fetch("/hello",
ssl_options=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_options_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception",
required=False):
resp = self.fetch(
"/hello", ssl_options=dict(cert_reqs=ssl.CERT_REQUIRED))
self.assertRaises(ssl.SSLError, resp.rethrow)
@unittest.skipIf(not hasattr(ssl, 'SSLContext'),
'ssl.SSLContext not present')
def test_ssl_context_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
resp = self.fetch("/hello", ssl_options=ctx)
self.assertRaises(ssl.SSLError, resp.rethrow)
def test_error_logging(self):
# No stack traces are logged for SSL errors (in this case,
# failure to validate the testing self-signed cert).
# The SSLError is exposed through ssl.SSLError.
with ExpectLog(gen_log, '.*') as expect_log:
response = self.fetch("/", validate_cert=True)
self.assertEqual(response.code, 599)
self.assertIsInstance(response.error, ssl.SSLError)
self.assertFalse(expect_log.logged_stack)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
# Close the request cleanly in HTTP/2; it will be skipped anyway.
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). Tornado always
# sends a content-length, so we simulate here a server that sends
# no content length and does not close the connection.
#
# Tests of a 204 response with a Content-Length header are included
# in SimpleHTTPClientTestMixin.
stream = request.connection.detach()
stream.write(
b"HTTP/1.1 204 No content\r\n\r\n")
stream.close()
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBodySizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallBody(RequestHandler):
def get(self):
self.write("a"*1024*64)
class LargeBody(RequestHandler):
def get(self):
self.write("a"*1024*100)
return Application([('/small', SmallBody),
('/large', LargeBody)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_body_size=1024*64)
def test_small_body(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'a'*1024*64)
def test_large_body(self):
with ExpectLog(gen_log, "Malformed HTTP message from None: Content-Length too long"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBufferSizeTest(AsyncHTTPTestCase):
def get_app(self):
class LargeBody(RequestHandler):
def get(self):
self.write("a"*1024*100)
return Application([('/large', LargeBody)])
def get_http_client(self):
# 100KB body with 64KB buffer
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_body_size=1024*100, max_buffer_size=1024*64)
def test_large_body(self):
response = self.fetch('/large')
response.rethrow()
self.assertEqual(response.body, b'a'*1024*100)
| apache-2.0 |
ryandvill/flask | scripts/test_import_migration.py | 141 | 2281 | # Tester for the flaskext_migrate.py module located in flask/scripts/
#
# Author: Keyan Pishdadian
import pytest
from redbaron import RedBaron
import flaskext_migrate as migrate
def test_simple_from_import():
red = RedBaron("from flask.ext import foo")
output = migrate.fix_tester(red)
assert output == "import flask_foo as foo"
def test_from_to_from_import():
red = RedBaron("from flask.ext.foo import bar")
output = migrate.fix_tester(red)
assert output == "from flask_foo import bar as bar"
def test_multiple_import():
red = RedBaron("from flask.ext.foo import bar, foobar, something")
output = migrate.fix_tester(red)
assert output == "from flask_foo import bar, foobar, something"
def test_multiline_import():
red = RedBaron("from flask.ext.foo import \
bar,\
foobar,\
something")
output = migrate.fix_tester(red)
assert output == "from flask_foo import bar, foobar, something"
def test_module_import():
red = RedBaron("import flask.ext.foo")
output = migrate.fix_tester(red)
assert output == "import flask_foo"
def test_named_module_import():
red = RedBaron("import flask.ext.foo as foobar")
output = migrate.fix_tester(red)
assert output == "import flask_foo as foobar"
def test_named_from_import():
red = RedBaron("from flask.ext.foo import bar as baz")
output = migrate.fix_tester(red)
assert output == "from flask_foo import bar as baz"
def test_parens_import():
red = RedBaron("from flask.ext.foo import (bar, foo, foobar)")
output = migrate.fix_tester(red)
assert output == "from flask_foo import (bar, foo, foobar)"
def test_function_call_migration():
red = RedBaron("flask.ext.foo(var)")
output = migrate.fix_tester(red)
assert output == "flask_foo(var)"
def test_nested_function_call_migration():
red = RedBaron("import flask.ext.foo\n\n"
"flask.ext.foo.bar(var)")
output = migrate.fix_tester(red)
assert output == ("import flask_foo\n\n"
"flask_foo.bar(var)")
def test_no_change_to_import():
red = RedBaron("from flask import Flask")
output = migrate.fix_tester(red)
assert output == "from flask import Flask"
| bsd-3-clause |
hpi-xnor/BMXNet | example/speech-demo/io_func/feat_readers/reader_htk.py | 25 | 2240 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy
import stats
from common import *
class htkReader(BaseReader):
def __init__(self, featureFile, labelFile, byteOrder=None):
BaseReader.__init__(self, featureFile, labelFile, byteOrder)
def Read(self):
#return numpy.ones((256, 819)).astype('float32'), numpy.ones(256).astype('int32')
with open(self.featureFile,"rb") as f:
dt = numpy.dtype([('numSamples',(numpy.int32,1)),('sampPeriod',(numpy.int32,1)),('sampSize',(numpy.int16,1)),('sampKind',(numpy.int16,1))])
header = numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=1)
numSamples = header[0]['numSamples']
sampPeriod = header[0]['sampPeriod']
sampSize = header[0]['sampSize']
sampKind = header[0]['sampKind']
# print 'Num samples = {}'.format(numSamples)
# print 'Sample period = {}'.format(sampPeriod)
# print 'Sample size = {}'.format(sampSize)
# print 'Sample kind = {}'.format(sampKind)
dt = numpy.dtype([('sample',(numpy.float32,sampSize/4))])
samples = numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=numSamples)
self._markDone()
if self.labelFile is None:
labels = None
else:
labels = ReadLabel(self.labelFile)
return samples[:]['sample'], labels
| apache-2.0 |
potsmaster/cinder | cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py | 19 | 22944 | # (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Cisco Zone Driver is responsible to manage access control using FC zoning
for Cisco FC fabrics.
This is a concrete implementation of FCZoneDriver interface implementing
add_connection and delete_connection interfaces.
**Related Flags**
:zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True
:zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack'
"""
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts
from cinder.zonemanager.drivers import fc_zone_driver
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
cisco_opts = [
cfg.StrOpt('cisco_sb_connector',
default='cinder.zonemanager.drivers.cisco'
'.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI',
help='Southbound connector for zoning operation'),
]
CONF = cfg.CONF
CONF.register_opts(cisco_opts, 'fc-zone-manager')
class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver):
"""Cisco FC zone driver implementation.
OpenStack Fibre Channel zone driver to manage FC zoning in
Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone driver
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
super(CiscoFCZoneDriver, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(cisco_opts)
# Adding a hack to handle parameters from super classes
# in case configured with multi backends.
fabric_names = self.configuration.safe_get('fc_fabric_names')
activate = self.configuration.safe_get('cisco_zone_activate')
prefix = self.configuration.safe_get('cisco_zone_name_prefix')
base_san_opts = []
if not fabric_names:
base_san_opts.append(
cfg.StrOpt('fc_fabric_names', default=None,
help='Comma separated list of fibre channel '
'fabric names. This list of names is used to'
' retrieve other SAN credentials for connecting'
' to each SAN fabric'
))
if not activate:
base_san_opts.append(
cfg.BoolOpt('cisco_zone_activate',
default=True,
help='Indicates whether zone should '
'be activated or not'))
if not prefix:
base_san_opts.append(
cfg.StrOpt('cisco_zone_name_prefix',
default="openstack",
help="A prefix to be used when naming zone"))
if len(base_san_opts) > 0:
CONF.register_opts(base_san_opts)
self.configuration.append_config_values(base_san_opts)
fabric_names = [x.strip() for x in self.
configuration.fc_fabric_names.split(',')]
# There can be more than one SAN in the network and we need to
# get credentials for each SAN.
if fabric_names:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
@lockutils.synchronized('cisco', 'fcfabric-', True)
def add_connection(self, fabric, initiator_target_map):
"""Concrete implementation of add_connection.
Based on zoning policy and state of each I-T pair, list of zone
members are created and pushed to the fabric to add zones. The
new zones created or zones updated are activated based on isActivate
flag set in cinder.conf returned by volume driver after attach
operation.
:param fabric: Fabric name from cinder.conf file
:param initiator_target_map: Mapping of initiator to list of targets
"""
LOG.debug("Add connection for Fabric: %s", fabric)
LOG.info(_LI("CiscoFCZoneDriver - Add connection "
"for I-T map: %s"), initiator_target_map)
fabric_ip = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_port')
zoning_policy = self.configuration.zoning_policy
zoning_policy_fab = self.fabric_configs[fabric].safe_get(
'cisco_zoning_policy')
if zoning_policy_fab:
zoning_policy = zoning_policy_fab
zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan')
LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy)
statusmap_from_fabric = self.get_zoning_status(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
if statusmap_from_fabric.get('session') == 'none':
cfgmap_from_fabric = self.get_active_zone_set(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
zone_names = []
if cfgmap_from_fabric.get('zones'):
zone_names = cfgmap_from_fabric['zones'].keys()
# based on zoning policy, create zone member list and
# push changes to fabric.
for initiator_key in initiator_target_map.keys():
zone_map = {}
initiator = initiator_key.lower()
t_list = initiator_target_map[initiator_key]
if zoning_policy == 'initiator-target':
for t in t_list:
target = t.lower()
zone_members = [
zm_utils.get_formatted_wwn(initiator),
zm_utils.get_formatted_wwn(target)]
zone_name = (self.
configuration.cisco_zone_name_prefix
+ initiator.replace(':', '')
+ target.replace(':', ''))
if (len(cfgmap_from_fabric) == 0 or (
zone_name not in zone_names)):
zone_map[zone_name] = zone_members
else:
# This is I-T zoning, skip if zone exists.
LOG.info(_LI("Zone exists in I-T mode. "
"Skipping zone creation %s"),
zone_name)
elif zoning_policy == 'initiator':
zone_members = [
zm_utils.get_formatted_wwn(initiator)]
for t in t_list:
target = t.lower()
zone_members.append(
zm_utils.get_formatted_wwn(target))
zone_name = self.configuration.cisco_zone_name_prefix \
+ initiator.replace(':', '')
if len(zone_names) > 0 and (zone_name in zone_names):
zone_members = zone_members + filter(
lambda x: x not in zone_members,
cfgmap_from_fabric['zones'][zone_name])
zone_map[zone_name] = zone_members
else:
msg = _("Zoning Policy: %s, not"
" recognized") % zoning_policy
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
LOG.info(_LI("Zone map to add: %s"), zone_map)
if len(zone_map) > 0:
conn = None
try:
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd,
port=fabric_port,
vsan=zoning_vsan)
conn.add_zones(
zone_map, self.configuration.cisco_zone_activate,
zoning_vsan, cfgmap_from_fabric,
statusmap_from_fabric)
conn.cleanup()
except exception.CiscoZoningCliException as cisco_ex:
msg = _("Exception: %s") % six.text_type(cisco_ex)
raise exception.FCZoneDriverException(msg)
except Exception:
msg = _("Failed to add zoning configuration.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug("Zones added successfully: %s", zone_map)
else:
LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
@lockutils.synchronized('cisco', 'fcfabric-', True)
def delete_connection(self, fabric, initiator_target_map):
"""Concrete implementation of delete_connection.
Based on zoning policy and state of each I-T pair, list of zones
are created for deletion. The zones are either updated deleted based
on the policy and attach/detach state of each I-T pair.
:param fabric: Fabric name from cinder.conf file
:param initiator_target_map: Mapping of initiator to list of targets
"""
LOG.debug("Delete connection for fabric: %s", fabric)
LOG.info(_LI("CiscoFCZoneDriver - Delete connection for I-T map: %s"),
initiator_target_map)
fabric_ip = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_port')
zoning_policy = self.configuration.zoning_policy
zoning_policy_fab = self.fabric_configs[fabric].safe_get(
'cisco_zoning_policy')
if zoning_policy_fab:
zoning_policy = zoning_policy_fab
zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan')
LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy)
statusmap_from_fabric = self.get_zoning_status(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
if statusmap_from_fabric.get('session') == 'none':
cfgmap_from_fabric = self.get_active_zone_set(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
zone_names = []
if cfgmap_from_fabric.get('zones'):
zone_names = cfgmap_from_fabric['zones'].keys()
# Based on zoning policy, get zone member list and push
# changes to fabric. This operation could result in an update
# for zone config with new member list or deleting zones from
# active cfg.
LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric)
for initiator_key in initiator_target_map.keys():
initiator = initiator_key.lower()
formatted_initiator = zm_utils.get_formatted_wwn(initiator)
zone_map = {}
zones_to_delete = []
t_list = initiator_target_map[initiator_key]
if zoning_policy == 'initiator-target':
# In this case, zone needs to be deleted.
for t in t_list:
target = t.lower()
zone_name = (
self.configuration.cisco_zone_name_prefix
+ initiator.replace(':', '')
+ target.replace(':', ''))
LOG.debug("Zone name to del: %s", zone_name)
if (len(zone_names) > 0 and (zone_name in zone_names)):
# delete zone.
LOG.debug("Added zone to delete to list: %s",
zone_name)
zones_to_delete.append(zone_name)
elif zoning_policy == 'initiator':
zone_members = [formatted_initiator]
for t in t_list:
target = t.lower()
zone_members.append(
zm_utils.get_formatted_wwn(target))
zone_name = self.configuration.cisco_zone_name_prefix \
+ initiator.replace(':', '')
if (zone_names and (zone_name in zone_names)):
filtered_members = filter(
lambda x: x not in zone_members,
cfgmap_from_fabric['zones'][zone_name])
# The assumption here is that initiator is always
# there in the zone as it is 'initiator' policy.
# We find the filtered list and if it is non-empty,
# add initiator to it and update zone if filtered
# list is empty, we remove that zone.
LOG.debug("Zone delete - I mode: filtered targets: %s",
filtered_members)
if filtered_members:
filtered_members.append(formatted_initiator)
LOG.debug("Filtered zone members to update: %s",
filtered_members)
zone_map[zone_name] = filtered_members
LOG.debug("Filtered zone Map to update: %s",
zone_map)
else:
zones_to_delete.append(zone_name)
else:
LOG.info(_LI("Zoning Policy: %s, not recognized"),
zoning_policy)
LOG.debug("Final Zone map to update: %s", zone_map)
LOG.debug("Final Zone list to delete: %s", zones_to_delete)
conn = None
try:
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd,
port=fabric_port,
vsan=zoning_vsan)
# Update zone membership.
if zone_map:
conn.add_zones(
zone_map, self.configuration.cisco_zone_activate,
zoning_vsan, cfgmap_from_fabric,
statusmap_from_fabric)
# Delete zones ~sk.
if zones_to_delete:
zone_name_string = ''
num_zones = len(zones_to_delete)
for i in range(0, num_zones):
if i == 0:
zone_name_string = ('%s%s' % (
zone_name_string,
zones_to_delete[i]))
else:
zone_name_string = ('%s%s%s' % (
zone_name_string, ';',
zones_to_delete[i]))
conn.delete_zones(zone_name_string,
self.configuration.
cisco_zone_activate,
zoning_vsan, cfgmap_from_fabric,
statusmap_from_fabric)
conn.cleanup()
except Exception:
msg = _("Failed to update or delete zoning configuration")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug("Zones deleted successfully: %s", zone_map)
else:
LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def get_san_context(self, target_wwn_list):
"""Lookup SAN context for visible end devices.
Look up each SAN configured and return a map of SAN (fabric IP) to
list of target WWNs visible to the fabric.
"""
formatted_target_list = []
fabric_map = {}
fabrics = [x.strip() for x in self.
configuration.fc_fabric_names.split(',')]
LOG.debug("Fabric List: %s", fabrics)
LOG.debug("Target wwn List: %s", target_wwn_list)
if len(fabrics) > 0:
for t in target_wwn_list:
formatted_target_list.append(
zm_utils.get_formatted_wwn(t.lower()))
LOG.debug("Formatted Target wwn List: %s", formatted_target_list)
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_port')
zoning_vsan = self.fabric_configs[fabric_name].safe_get(
'cisco_zoning_vsan')
# Get name server data from fabric and get the targets
# logged in.
nsinfo = None
try:
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd, port=fabric_port,
vsan=zoning_vsan)
nsinfo = conn.get_nameserver_info()
LOG.debug("show fcns database info from fabric: %s",
nsinfo)
conn.cleanup()
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting show fcns database "
"info."))
except Exception:
msg = _("Failed to get show fcns database info.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
visible_targets = filter(
lambda x: x in formatted_target_list, nsinfo)
if visible_targets:
LOG.info(_LI("Filtered targets for SAN is: %s"),
{fabric_name: visible_targets})
# getting rid of the ':' before returning
for idx, elem in enumerate(visible_targets):
visible_targets[idx] = six.text_type(
visible_targets[idx]).replace(':', '')
fabric_map[fabric_name] = visible_targets
else:
LOG.debug("No targets are in the fcns info for SAN %s",
fabric_name)
LOG.debug("Return SAN context output: %s", fabric_map)
return fabric_map
def get_active_zone_set(self, fabric_ip,
fabric_user, fabric_pwd, fabric_port,
zoning_vsan):
"""Gets active zoneset config for vsan."""
cfgmap = {}
conn = None
try:
LOG.debug("Southbound connector: %s",
self.configuration.cisco_sb_connector)
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip, username=fabric_user,
password=fabric_pwd, port=fabric_port, vsan=zoning_vsan)
cfgmap = conn.get_active_zone_set()
conn.cleanup()
except Exception:
msg = _("Failed to access active zoning configuration.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug("Active zone set from fabric: %s", cfgmap)
return cfgmap
def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd,
fabric_port, zoning_vsan):
"""Gets zoneset status and mode."""
statusmap = {}
conn = None
try:
LOG.debug("Southbound connector: %s",
self.configuration.cisco_sb_connector)
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip, username=fabric_user,
password=fabric_pwd, port=fabric_port, vsan=zoning_vsan)
statusmap = conn.get_zoning_status()
conn.cleanup()
except Exception:
msg = _("Failed to access zoneset status:%s")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug("Zoneset status from fabric: %s", statusmap)
return statusmap
| apache-2.0 |
w1kke/hyperopt | hyperopt/pyll/tests/test_stochastic.py | 7 | 2027 | import numpy as np
from hyperopt.pyll import scope, as_apply, dfs, rec_eval
from hyperopt.pyll.stochastic import *
def test_recursive_set_rng_kwarg():
uniform = scope.uniform
a = as_apply([uniform(0, 1), uniform(2, 3)])
rng = np.random.RandomState(234)
recursive_set_rng_kwarg(a, rng)
print a
val_a = rec_eval(a)
assert 0 < val_a[0] < 1
assert 2 < val_a[1] < 3
def test_lnorm():
G = scope
choice = G.choice
uniform = G.uniform
quantized_uniform = G.quniform
inker_size = quantized_uniform(low=0, high=7.99, q=2) + 3
# -- test that it runs
lnorm = as_apply({'kwargs': {'inker_shape' : (inker_size, inker_size),
'outker_shape' : (inker_size, inker_size),
'remove_mean' : choice([0, 1]),
'stretch' : uniform(low=0, high=10),
'threshold' : uniform(
low=.1 / np.sqrt(10.),
high=10 * np.sqrt(10))
}})
print lnorm
print 'len', len(str(lnorm))
# not sure what to assert
# ... this is too fagile
# assert len(str(lnorm)) == 980
def test_sample_deterministic():
aa = as_apply([0, 1])
print aa
dd = sample(aa, np.random.RandomState(3))
assert dd == (0, 1)
def test_repeatable():
u = scope.uniform(0, 1)
aa = as_apply(dict(
u = u,
n = scope.normal(5, 0.1),
l = [0, 1, scope.one_of(2, 3), u]))
dd1 = sample(aa, np.random.RandomState(3))
dd2 = sample(aa, np.random.RandomState(3))
dd3 = sample(aa, np.random.RandomState(4))
assert dd1 == dd2
assert dd1 != dd3
def test_sample():
u = scope.uniform(0, 1)
aa = as_apply(dict(
u = u,
n = scope.normal(5, 0.1),
l = [0, 1, scope.one_of(2, 3), u]))
print aa
dd = sample(aa, np.random.RandomState(3))
assert 0 < dd['u'] < 1
assert 4 < dd['n'] < 6
assert dd['u'] == dd['l'][3]
assert dd['l'][:2] == (0, 1)
assert dd['l'][2] in (2, 3)
| bsd-3-clause |
wtsi-hgi/serapis | serapis/domain/models/identifiers.py | 1 | 2862 | #################################################################################
#
# Copyright (c) 2013 Genome Research Ltd.
#
# Author: Irina Colgiu <ic4@sanger.ac.uk>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import re
from serapis.com import wrappers
class EntityIdentifier(object):
@classmethod
@wrappers.check_args_not_none
def _is_accession_nr(cls, field):
"""
The ENA accession numbers all start with: ERS, SRS, DRS or EGA.
"""
if type(field) == int:
return False
if field.startswith('ER') or field.startswith('SR') or field.startswith('DR') or field.startswith('EGA'):
return True
return False
@classmethod
@wrappers.check_args_not_none
def _is_internal_id(cls, field):
""" All internal ids are int. You can't really tell if one identifier
is an internal id just by the fact that it's type is int, but you
can tell if it isn't, if it contains characters other than digits.
"""
if type(field) == int:
return True
if field.isdigit():
return True
return False
@classmethod
@wrappers.check_args_not_none
def _is_name(cls, field):
""" You can't tell for sure if one identifier is a name or not either.
Basically if it contains numbers and alphabet characters, it may be a name."""
if not type(field) == str:
return False
is_match = re.search('^[0-9a-zA-Z]*$', field)
if is_match:
return True
return False
@classmethod
@wrappers.check_args_not_none
def guess_identifier_type(cls, identifier):
"""
This method receives the value of an identifier and returns its inferred type,
where the identifier type options are: internal_id, name and accession_number
"""
if cls._is_accession_nr(identifier):
identifier_type = 'accession_number'
elif cls._is_internal_id(identifier):
identifier_type = 'internal_id'
else:
identifier_type = 'name'
return identifier_type
| agpl-3.0 |
cjcopi/healpy | healpy/__init__.py | 2 | 2998 | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""HealPy is a package to manipulate Healpix maps (ang2pix, pix2ang) and
compute spherical harmonics tranforms on them.
"""
import warnings
from .version import __version__
from .pixelfunc import (
ma,
mask_good,
mask_bad,
ang2pix,
pix2ang,
xyf2pix,
pix2xyf,
pix2vec,
vec2pix,
vec2ang,
ang2vec,
nside2npix,
npix2nside,
nside2order,
order2nside,
isnsideok,
isnpixok,
ring2nest,
nest2ring,
reorder,
get_all_neighbours,
max_pixrad,
get_interp_val,
get_interp_weights,
fit_dipole,
fit_monopole,
remove_dipole,
remove_monopole,
get_nside,
maptype,
ud_grade,
nside2resol,
nside2pixarea,
get_map_size,
)
from .sphtfunc import (
anafast,
map2alm,
alm2map,
Alm,
synalm,
synfast,
smoothing,
smoothalm,
almxfl,
alm2cl,
pixwin,
alm2map_der1,
gauss_beam,
bl2beam,
beam2bl,
check_max_nside,
)
from ._query_disc import query_disc, query_strip, query_polygon, boundaries
from ._pixelfunc import ringinfo, pix2ring
from ._sphtools import rotate_alm
from ._sphtools import alm2map_spin_healpy as alm2map_spin
from ._sphtools import map2alm_spin_healpy as map2alm_spin
from .rotator import Rotator, vec2dir, dir2vec
from ._healpy_pixel_lib import UNSEEN
from .visufunc import (
mollview,
graticule,
delgraticules,
gnomview,
projplot,
projscatter,
projtext,
cartview,
orthview,
azeqview,
)
from .zoomtool import mollzoom, set_g_clim
from .fitsfunc import write_map, read_map, read_alm, write_alm, write_cl, read_cl
from ._masktools import dist2holes_healpy as dist2holes
from ._hotspots import hotspots_healpy as hotspots
from ._line_integral_convolution import line_integral_convolution
def disable_warnings():
"""Disable all healpy warnings messages for the current session
Warnings from individual functions can be disabled setting
``verbose=False``.
Warnings can be re-enabled calling ``hp.enable_warnings()``.
"""
warnings.filterwarnings(action="ignore", module="healpy")
def enable_warnings():
warnings.simplefilter("always") | gpl-2.0 |
Sentient07/scikit-learn | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
pilou-/ansible | lib/ansible/modules/network/avi/avi_backupconfiguration.py | 31 | 4835 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_backupconfiguration
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of BackupConfiguration Avi RESTful Object
description:
- This module is used to configure BackupConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
backup_file_prefix:
description:
- Prefix of the exported configuration file.
- Field introduced in 17.1.1.
backup_passphrase:
description:
- Passphrase of backup configuration.
maximum_backups_stored:
description:
- Rotate the backup files based on this count.
- Allowed values are 1-20.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
name:
description:
- Name of backup configuration.
required: true
remote_directory:
description:
- Directory at remote destination with write permission for ssh user.
remote_hostname:
description:
- Remote destination.
save_local:
description:
- Local backup.
type: bool
ssh_user_ref:
description:
- Access credentials for remote destination.
- It is a reference to an object of type cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
upload_to_remote_host:
description:
- Remote backup.
type: bool
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create BackupConfiguration object
avi_backupconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_backupconfiguration
"""
RETURN = '''
obj:
description: BackupConfiguration (api/backupconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
backup_file_prefix=dict(type='str',),
backup_passphrase=dict(type='str', no_log=True,),
maximum_backups_stored=dict(type='int',),
name=dict(type='str', required=True),
remote_directory=dict(type='str',),
remote_hostname=dict(type='str',),
save_local=dict(type='bool',),
ssh_user_ref=dict(type='str',),
tenant_ref=dict(type='str',),
upload_to_remote_host=dict(type='bool',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'backupconfiguration',
set(['backup_passphrase']))
if __name__ == '__main__':
main()
| gpl-3.0 |
clovett/MissionPlanner | Lib/distutils/command/register.py | 50 | 11884 | """distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
__revision__ = "$Id$"
import urllib2
import getpass
import urlparse
import StringIO
from warnings import warn
from distutils.core import PyPIRCCommand
from distutils import log
class register(PyPIRCCommand):
description = ("register the distribution with the Python package index")
user_options = PyPIRCCommand.user_options + [
('list-classifiers', None,
'list the valid Trove classifiers'),
('strict', None ,
'Will stop the registering if the meta-data are not fully compliant')
]
boolean_options = PyPIRCCommand.boolean_options + [
'verify', 'list-classifiers', 'strict']
sub_commands = [('check', lambda self: True)]
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.list_classifiers = 0
self.strict = 0
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
# setting options for the `check` subcommand
check_options = {'strict': ('register', self.strict),
'restructuredtext': ('register', 1)}
self.distribution.command_options['check'] = check_options
def run(self):
self.finalize_options()
self._set_config()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.register.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.strict = self.strict
check.restructuredtext = 1
check.run()
def _set_config(self):
''' Reads the configuration file and set attributes.
'''
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
self.has_config = True
else:
if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
raise ValueError('%s not found in .pypirc' % self.repository)
if self.repository == 'pypi':
self.repository = self.DEFAULT_REPOSITORY
self.has_config = False
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
log.info(response.read())
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
log.info('Server response (%s): %s' % (code, result))
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[distutils] containing username and password entries (both
in clear text). Eg:
[distutils]
index-servers =
pypi
[pypi]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
# see if we can short-cut and get the username/password from the
# config
if self.has_config:
choice = '1'
username = self.username
password = self.password
else:
choice = 'x'
username = password = ''
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
self.announce('''\
We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''', log.INFO)
choice = raw_input()
if not choice:
choice = '1'
elif choice not in choices:
print 'Please choose one of the four options!'
if choice == '1':
# get the username and password
while not username:
username = raw_input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password(self.realm, host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
self.announce('Server response (%s): %s' % (code, result),
log.INFO)
# possibly save the login
if code == 200:
if self.has_config:
# sharing the password in the distribution instance
# so the upload command can reuse it
self.distribution.password = password
else:
self.announce(('I can store your PyPI login so future '
'submissions will be faster.'), log.INFO)
self.announce('(the login will be stored in %s)' % \
self._get_rc_file(), log.INFO)
choice = 'X'
while choice.lower() not in 'yn':
choice = raw_input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
self._store_pypirc(username, password)
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = raw_input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print "Password and confirm don't match!"
while not data['email']:
data['email'] = raw_input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
log.info('Server response (%s): %s' % (code, result))
else:
log.info('You will receive an email shortly.')
log.info(('Follow the instructions in it to '
'complete registration.'))
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = raw_input('Your email address: ')
code, result = self.post_to_server(data)
log.info('Server response (%s): %s' % (code, result))
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
if 'name' in data:
self.announce('Registering %s to %s' % (data['name'],
self.repository),
log.INFO)
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
dashes = '-' * 75
self.announce('%s%s%s' % (dashes, data, dashes))
return result
| gpl-3.0 |
nagyistoce/edx-platform | common/lib/xmodule/setup.py | 72 | 3491 | from setuptools import setup, find_packages
XMODULES = [
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"library_content = xmodule.library_content_module:LibraryContentDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"split_test = xmodule.split_test_module:SplitTestDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"imageannotation = xmodule.imageannotation_module:ImageAnnotationDescriptor",
"foldit = xmodule.foldit_module:FolditDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
XBLOCKS = [
"library = xmodule.library_root_xblock:LibraryRoot",
"vertical = xmodule.vertical_block:VerticalBlock",
"wrapper = xmodule.wrapper_module:WrapperBlock",
]
setup(
name="XModule",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'setuptools',
'docopt',
'capa',
'path.py',
'webob',
'opaque-keys',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES + XBLOCKS,
'xmodule.v1': XMODULES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
| agpl-3.0 |
Carmezim/tensorflow | tensorflow/python/kernel_tests/fractional_max_pool_op_test.py | 107 | 24134 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fractional max pool operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class FractionalMaxPoolTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261)
_SEED = 123456
_SEED2 = 654321
def _MaxPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform max pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.amax(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _MaxPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform max pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._MaxPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalMaxPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional max pool result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of max pooling on input_tensor based on
pooling region defined by row_seq and col_seq, conditioned on whether or
not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._MaxPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._MaxPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalMaxPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalMaxPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.test_session() as sess:
p, r, c = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual, row_seq, col_seq = sess.run([p, r, c])
expected = self._GetExpectedFractionalMaxPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalMaxPoolResult is 'automated', it feel safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractinalMaxPool and _GetExpectedFractionalMaxPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalMaxPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.test_session() as sess:
p, r, c = nn_ops.fractional_max_pool(
rand_mat,
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
tensor_output, row_seq, col_seq = sess.run([p, r, c])
expected_result = self._GetExpectedFractionalMaxPoolResult(rand_mat,
row_seq,
col_seq,
overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalMaxPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_max_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test it works fine when input tensor is integer type.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
pseudo_random = True
overlapping = True
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
rand_mat = self._PRNG.randint(1000, size=tensor_shape)
self._ValidateFractionalMaxPoolResult(rand_mat,
[1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random, overlapping)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular max pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalMaxPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
class FractionalMaxPoolGradTest(test.TestCase):
"""Tests for FractionalMaxPoolGrad.
Two types of tests for FractionalMaxPoolGrad.
1) Test fractional_max_pool_grad() directly.
This type of test relies on gen_nn_ops._max_pool_grad() returns the correct
result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really import, since 10/2 is divisible
max pooling should generate the same result as fractional max pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
max pooling should generate the same result as fractional max pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261)
_SEED = 123456
_SEED2 = 654321
def _GenerateUniqueRandomInputTensor(self, shape):
"""Generate 'unqiue' random input tensor.
'Unique' means there's no collision values in the tensor, all elements are
different. This is done by generating sequence of integers with step of 1
and then randomly shuffle these integers.
Args:
shape: Shape of the tensor desired.
Returns:
A numpy ndarray with size = shape and dtype = numpy.float32.
"""
num_elements = 1
for size in shape:
num_elements *= size
x = np.arange(num_elements, dtype=np.float32)
self._PRNG.shuffle(x)
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateUniqueRandomInputTensor(input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
output_tensor,
output_backprop,
window_size,
stride_size,
padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
row_seq,
col_seq,
overlapping=False)
fmp_input_backprop = fmp_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateUniqueRandomInputTensor(input_shape))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
output_tensor,
output_backprop,
window_size,
stride_size,
padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
input_tensor,
output_tensor,
output_backprop,
row_seq,
col_seq,
overlapping=True)
fmp_input_backprop = fmp_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateUniqueRandomInputTensor(input_shape)
# Add some randomness to make input_data not so 'integer'
input_data += self._PRNG.random_sample(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_max_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testWhenRepeatedMaxValueInPoolingRegion(self):
"""Test when there's repeating value in pooling region.
There's no formal definition for what the gradient should be when there're
multiple max value within a pooling cell. Such as
| 1 5 |
| 5 3 |
The expected result depends heavily on implementation, if someone swap the
order of a nested for loop when walking through the tensor, result would be
very different.
The goal of this test is to alert when someone else change the
implementation. Current implementation scans row-by-row.
"""
input_data = [5.0, 4.0, 6.0, 7.0,
3.0, 5.0, 9.0, 6.0,
8.0, 8.0, 9.0, 5.0,
7.0, 4.0, 0.0, 0.0] # pyformat: disable
input_size = [1, 4, 4, 1]
output_backprop = [12.0, 15.0,
17.0, -5.0,
6.0, 21.0] # pyformat: disable
row_seq = [0, 1, 3, 4]
col_seq = [0, 2, 4]
output_data_not_overlapping = [5.0, 7.0,
8.0, 9.0,
7.0, 0.0] # pyformat: disable
output_data_overlapping = [9.0, 9.0,
9.0, 9.0,
7.0, 0.0] # pyformat: disable
output_size = [1, 3, 2, 1]
expected_input_backprop_not_overlapping = np.reshape(
[12.0, 0.0, 0.0, 15.0,
0.0, 0.0, -5.0, 0.0,
17.0, 0.0, 0.0, 0.0,
6.0, 0.0, 21.0, 0.0],
input_size) # pyformat: disable
expected_input_backprop_overlapping = np.reshape(
[0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 39.0, 0.0,
0.0, 0.0, 0.0, 0.0,
6.0, 0.0, 21.0, 0.0],
input_size) # pyformat: disable
with self.test_session() as _:
# Test when overlapping is False
input_tensor = constant_op.constant(input_data, shape=input_size)
output_tensor = constant_op.constant(
output_data_not_overlapping, shape=output_size)
grad = constant_op.constant(output_backprop, shape=output_size)
r = gen_nn_ops._fractional_max_pool_grad(
input_tensor,
output_tensor,
grad,
row_seq,
col_seq,
overlapping=False)
input_backprop_not_overlapping = r.eval()
self.assertShapeEqual(
np.reshape(expected_input_backprop_not_overlapping, input_size), r)
self.assertAllClose(expected_input_backprop_not_overlapping,
input_backprop_not_overlapping)
# Test when overlapping is True
output_tensor = constant_op.constant(
output_data_overlapping, shape=output_size)
r = gen_nn_ops._fractional_max_pool_grad(
input_tensor, output_tensor, grad, row_seq, col_seq, overlapping=True)
input_backprop_overlapping = r.eval()
self.assertShapeEqual(
np.reshape(expected_input_backprop_overlapping, input_size), r)
self.assertAllClose(expected_input_backprop_overlapping,
input_backprop_overlapping)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kustodian/ansible | test/units/modules/network/cloudvision/test_cv_server_provision.py | 52 | 46155 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat import unittest
from units.compat.mock import patch, Mock
import sys
sys.modules['cvprac'] = Mock()
sys.modules['cvprac.cvp_client'] = Mock()
sys.modules['cvprac.cvp_client_errors'] = Mock()
from ansible.modules.network.cloudvision import cv_server_provision
class MockException(Exception):
pass
class TestCvServerProvision(unittest.TestCase):
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpApiError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_module_args(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf, mock_exception):
''' Test main module args.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='show', switch_name='eos')
mock_module_object.fail_json.side_effect = SystemExit('Exiting')
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.side_effect = mock_exception('Error Getting Info')
argument_spec = dict(
host=dict(required=True),
port=dict(required=False, default=None),
protocol=dict(default='https', choices=['http', 'https']),
username=dict(required=True),
password=dict(required=True, no_log=True),
server_name=dict(required=True),
switch_name=dict(required=True),
switch_port=dict(required=True),
port_vlan=dict(required=False, default=None),
template=dict(require=True),
action=dict(default='show', choices=['show', 'add', 'remove']),
auto_run=dict(type='bool', default=False),
)
self.assertRaises(SystemExit, cv_server_provision.main)
mock_module.assert_called_with(argument_spec=argument_spec,
supports_check_mode=False)
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
mock_comp.assert_not_called()
mock_server_conf.assert_not_called()
mock_module_object.fail_json.assert_called_with(msg='Error Getting Info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpApiError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_no_switch_configlet(self, mock_module, mock_connect,
mock_info, mock_comp, mock_server_conf,
mock_exception):
''' Test main fails if switch has no configlet for Ansible to edit.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos')
mock_module_object.fail_json.side_effect = SystemExit('Exiting')
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = None
self.assertRaises(SystemExit, cv_server_provision.main)
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
mock_module_object.fail_json.assert_called_with(
msg='Switch eos has no configurable server ports.')
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpApiError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_port_not_in_config(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf,
mock_port_conf, mock_exception):
''' Test main fails if user specified port not in configlet.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3')
mock_module_object.fail_json.side_effect = SystemExit('Exiting')
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = None
self.assertRaises(SystemExit, cv_server_provision.main)
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
mock_module_object.fail_json.assert_called_with(
msg='Port 3 is not configurable as a server port on switch eos.')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_show(self, mock_module, mock_connect, mock_info, mock_comp,
mock_server_conf, mock_port_conf, mock_conf_action):
''' Test main good with show action.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='show', switch_name='eos',
switch_port='3', auto_run=False)
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict()
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
mock_comp.assert_not_called()
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=False, switchInfo='Info',
switchConfigurable=True, portConfigurable=True,
taskCreated=False, taskExecuted=False,
taskCompleted=False)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_add_no_auto_run(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf, mock_port_conf,
mock_conf_action):
''' Test main good with add action and no auto_run.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3', auto_run=False)
mock_module.return_value = mock_module_object
mock_connect.return_value = 'Client'
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict(taskCreated=True)
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=False, switchInfo='Info',
switchConfigurable=True, portConfigurable=True,
taskCreated=True, taskExecuted=False,
taskCompleted=False)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.wait_for_task_completion')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_update_task')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_add_auto_run(self, mock_module, mock_connect, mock_info,
mock_comp, mock_server_conf, mock_port_conf,
mock_conf_action, mock_conf_task, mock_wait):
''' Test main good with add and auto_run. Config updated, task created.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3', auto_run=True)
mock_module.return_value = mock_module_object
mock_client_object = Mock()
mock_connect.return_value = mock_client_object
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict(taskCreated=True, changed=True)
mock_conf_task.return_value = '7'
mock_wait.return_value = True
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
self.assertEqual(mock_conf_task.call_count, 1)
self.assertEqual(mock_wait.call_count, 1)
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=True, switchInfo='Info', taskId='7',
switchConfigurable=True, portConfigurable=True,
taskCreated=True, taskExecuted=True,
taskCompleted=True)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.wait_for_task_completion')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_update_task')
@patch('ansible.modules.network.cloudvision.cv_server_provision.configlet_action')
@patch('ansible.modules.network.cloudvision.cv_server_provision.port_configurable')
@patch('ansible.modules.network.cloudvision.cv_server_provision.server_configurable_configlet')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_in_compliance')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
@patch('ansible.modules.network.cloudvision.cv_server_provision.connect')
@patch('ansible.modules.network.cloudvision.cv_server_provision.AnsibleModule')
def test_main_add_auto_run_no_task(self, mock_module, mock_connect,
mock_info, mock_comp, mock_server_conf,
mock_port_conf, mock_conf_action, mock_conf_task,
mock_wait):
''' Test main good with add and auto_run. Config not updated, no task.
'''
mock_module_object = Mock()
mock_module_object.params = dict(action='add', switch_name='eos',
switch_port='3', auto_run=True)
mock_module.return_value = mock_module_object
mock_client_object = Mock()
mock_connect.return_value = mock_client_object
mock_info.return_value = 'Info'
mock_server_conf.return_value = 'Configlet'
mock_port_conf.return_value = 'Port'
mock_conf_action.return_value = dict(taskCreated=True, changed=False)
mock_conf_task.return_value = None
cv_server_provision.main()
self.assertEqual(mock_connect.call_count, 1)
self.assertEqual(mock_info.call_count, 1)
self.assertEqual(mock_comp.call_count, 1)
self.assertEqual(mock_server_conf.call_count, 1)
self.assertEqual(mock_port_conf.call_count, 1)
self.assertEqual(mock_conf_action.call_count, 1)
self.assertEqual(mock_conf_task.call_count, 1)
mock_wait.assert_not_called()
mock_module_object.fail_json.assert_not_called()
return_dict = dict(changed=False, switchInfo='Info',
switchConfigurable=True, portConfigurable=True,
taskCreated=False, taskExecuted=False,
taskCompleted=False)
mock_module_object.exit_json.assert_called_with(**return_dict)
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpClient')
def test_connect_good(self, mock_client):
''' Test connect success.
'''
module = Mock()
module.params = dict(host='host', username='username',
password='password', protocol='https', port='10')
connect_mock = Mock()
mock_client.return_value = connect_mock
client = cv_server_provision.connect(module)
self.assertIsInstance(client, Mock)
self.assertEqual(mock_client.call_count, 1)
connect_mock.connect.assert_called_once_with(['host'], 'username',
'password', port='10',
protocol='https')
module.fail_json.assert_not_called()
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpLoginError',
new_callable=lambda: MockException)
@patch('ansible.modules.network.cloudvision.cv_server_provision.CvpClient')
def test_connect_fail(self, mock_client, mock_exception):
''' Test connect failure with login error.
'''
module = Mock()
module.params = dict(host='host', username='username',
password='password', protocol='https', port='10')
module.fail_json.side_effect = SystemExit
connect_mock = Mock()
connect_mock.connect.side_effect = mock_exception('Login Error')
mock_client.return_value = connect_mock
self.assertRaises(SystemExit, cv_server_provision.connect, module)
self.assertEqual(connect_mock.connect.call_count, 1)
module.fail_json.assert_called_once_with(msg='Login Error')
def test_switch_info_good(self):
''' Test switch_info success.
'''
module = Mock()
module.params = dict(switch_name='eos')
module.client.api.get_device_by_name.return_value = dict(fqdn='eos')
info = cv_server_provision.switch_info(module)
self.assertEqual(module.client.api.get_device_by_name.call_count, 1)
self.assertEqual(info['fqdn'], 'eos')
module.fail_json.assert_not_called()
def test_switch_info_no_switch(self):
''' Test switch_info fails.
'''
module = Mock()
module.params = dict(switch_name='eos')
module.client.api.get_device_by_name.return_value = None
info = cv_server_provision.switch_info(module)
self.assertEqual(module.client.api.get_device_by_name.call_count, 1)
self.assertEqual(info, None)
module.fail_json.assert_called_once_with(
msg="Device with name 'eos' does not exist.")
def test_switch_in_compliance_good(self):
''' Test switch_in_compliance good.
'''
module = Mock()
module.client.api.check_compliance.return_value = dict(
complianceCode='0000')
sw_info = dict(key='key', type='type', fqdn='eos')
cv_server_provision.switch_in_compliance(module, sw_info)
self.assertEqual(module.client.api.check_compliance.call_count, 1)
module.fail_json.assert_not_called()
def test_switch_in_compliance_fail(self):
''' Test switch_in_compliance fail.
'''
module = Mock()
module.client.api.check_compliance.return_value = dict(
complianceCode='0001')
sw_info = dict(key='key', type='type', fqdn='eos')
cv_server_provision.switch_in_compliance(module, sw_info)
self.assertEqual(module.client.api.check_compliance.call_count, 1)
module.fail_json.assert_called_with(
msg='Switch eos is not in compliance.'
' Returned compliance code 0001.')
def test_server_configurable_configlet_good(self):
''' Test server_configurable_configlet good.
'''
module = Mock()
module.params = dict(switch_name='eos')
configlets = [dict(name='configlet1', info='line'),
dict(name='eos-server', info='info')]
module.client.api.get_configlets_by_device_id.return_value = configlets
sw_info = dict(key='key', type='type', fqdn='eos')
result = cv_server_provision.server_configurable_configlet(module,
sw_info)
self.assertEqual(module.client.api.get_configlets_by_device_id.call_count, 1)
self.assertIsNotNone(result)
self.assertEqual(result['name'], 'eos-server')
self.assertEqual(result['info'], 'info')
def test_server_configurable_configlet_not_configurable(self):
''' Test server_configurable_configlet fail. No server configlet.
'''
module = Mock()
module.params = dict(switch_name='eos')
configlets = [dict(name='configlet1', info='line'),
dict(name='configlet2', info='info')]
module.client.api.get_configlets_by_device_id.return_value = configlets
sw_info = dict(key='key', type='type', fqdn='eos')
result = cv_server_provision.server_configurable_configlet(module, sw_info)
self.assertEqual(module.client.api.get_configlets_by_device_id.call_count, 1)
self.assertIsNone(result)
def test_server_configurable_configlet_no_configlets(self):
''' Test server_configurable_configlet fail. No switch configlets.
'''
module = Mock()
module.params = dict(switch_name='eos')
module.client.api.get_configlets_by_device_id.return_value = []
sw_info = dict(key='key', type='type', fqdn='eos')
result = cv_server_provision.server_configurable_configlet(module,
sw_info)
self.assertEqual(module.client.api.get_configlets_by_device_id.call_count, 1)
self.assertIsNone(result)
def test_port_configurable_good(self):
''' Test port_configurable user provided switch port in configlet.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', config=config)
result = cv_server_provision.port_configurable(module, configlet)
self.assertTrue(result)
def test_port_configurable_fail(self):
''' Test port_configurable user provided switch port not in configlet.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='2')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', config=config)
result = cv_server_provision.port_configurable(module, configlet)
self.assertFalse(result)
def test_port_configurable_fail_no_config(self):
''' Test port_configurable configlet empty.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='2')
config = ''
configlet = dict(name='eos-server', config=config)
result = cv_server_provision.port_configurable(module, configlet)
self.assertFalse(result)
def test_configlet_action_show_blank_config(self):
''' Test configlet_action show returns current port configuration.
'''
module = Mock()
module.params = dict(action='show', switch_name='eos', switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', key='key', config=config)
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['currentConfigBlock'], 'interface Ethernet3\n!')
module.client.api.update_configlet.assert_not_called()
@patch('ansible.modules.network.cloudvision.cv_server_provision.config_from_template')
def test_configlet_action_add_with_task(self, mock_template):
''' Test configlet_action add with change updates configlet and adds
proper info to return data. Including task spawned info.
'''
module = Mock()
module.params = dict(action='add', switch_name='eos', switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', key='key', config=config)
template_config = ('interface Ethernet3\n description Host eos'
' managed by Ansible and Jinja template\n'
' load-interval 30\n'
' switchport\n'
' switchport mode trunk\n'
' no shutdown\n!')
mock_template.return_value = template_config
update_return = dict(data='Configlet eos-server successfully updated'
' and task initiated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'], 'interface Ethernet3\n!')
full_config = '!\n' + template_config + '\ninterface Ethernet4\n!'
self.assertEqual(result['fullConfig'], full_config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertTrue(result['changed'])
self.assertTrue(result['taskCreated'])
self.assertEqual(module.client.api.update_configlet.call_count, 1)
@patch('ansible.modules.network.cloudvision.cv_server_provision.config_from_template')
def test_configlet_action_add_no_task(self, mock_template):
''' Test configlet_action add that doesn't change configlet adds proper
info to return data. Does not including any task info.
'''
module = Mock()
module.params = dict(action='add', switch_name='eos', switch_port='3')
config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
configlet = dict(name='eos-server', key='key', config=config)
template_config = 'interface Ethernet3\n description test\n!'
mock_template.return_value = template_config
update_return = dict(data='Configlet eos-server successfully updated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'],
'interface Ethernet3\n description test\n!')
self.assertEqual(result['fullConfig'], config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertNotIn('changed', result)
self.assertNotIn('taskCreated', result)
self.assertEqual(module.client.api.update_configlet.call_count, 1)
def test_configlet_action_remove_with_task(self):
''' Test configlet_action remove with change updates configlet and adds
proper info to return data. Including task spawned info.
'''
module = Mock()
module.params = dict(action='remove', switch_name='eos',
switch_port='3')
config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
configlet = dict(name='eos-server', key='key', config=config)
update_return = dict(data='Configlet eos-server successfully updated'
' and task initiated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'],
'interface Ethernet3\n description test\n!')
full_config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
self.assertEqual(result['fullConfig'], full_config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertTrue(result['changed'])
self.assertTrue(result['taskCreated'])
self.assertEqual(module.client.api.update_configlet.call_count, 1)
def test_configlet_action_remove_no_task(self):
''' Test configlet_action with remove that doesn't change configlet and
adds proper info to return data. Does not including any task info.
'''
module = Mock()
module.params = dict(action='remove', switch_name='eos',
switch_port='3')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
configlet = dict(name='eos-server', key='key', config=config)
update_return = dict(data='Configlet eos-server successfully updated.')
module.client.api.update_configlet.return_value = update_return
result = cv_server_provision.configlet_action(module, configlet)
self.assertIsNotNone(result)
self.assertEqual(result['oldConfigBlock'], 'interface Ethernet3\n!')
self.assertEqual(result['fullConfig'], config)
self.assertEqual(result['updateConfigletResponse'],
update_return['data'])
self.assertNotIn('changed', result)
self.assertNotIn('taskCreated', result)
self.assertEqual(module.client.api.update_configlet.call_count, 1)
def test_current_config_empty_config(self):
''' Test current_config with empty config for port
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='4')
config = '!\ninterface Ethernet3\n!\ninterface Ethernet4'
result = cv_server_provision.current_config(module, config)
self.assertIsNotNone(result)
self.assertEqual(result, 'interface Ethernet4')
def test_current_config_with_config(self):
''' Test current_config with config for port
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
result = cv_server_provision.current_config(module, config)
self.assertIsNotNone(result)
self.assertEqual(result, 'interface Ethernet3\n description test\n!')
def test_current_config_no_match(self):
''' Test current_config with no entry for port
'''
module = Mock()
module.fail_json.side_effect = SystemExit
module.params = dict(switch_name='eos', switch_port='2')
config = '!\ninterface Ethernet3\n description test\n!'
self.assertRaises(SystemExit, cv_server_provision.current_config,
module, config)
def test_valid_template_true(self):
''' Test valid_template true
'''
template = 'interface Ethernet3\n description test\n!'
result = cv_server_provision.valid_template('3', template)
self.assertTrue(result)
def test_valid_template_false(self):
''' Test valid_template false
'''
template = 'interface Ethernet3\n description test\n!'
result = cv_server_provision.valid_template('4', template)
self.assertFalse(result)
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_no_template(self, mock_file_sys, mock_env,
mock_debug):
''' Test config_from_template good. No template.
'''
module = Mock()
module.fail_json.side_effect = SystemExit
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
env_mock = Mock()
env_mock.get_template.return_value = None
mock_env.return_value = env_mock
self.assertRaises(SystemExit, cv_server_provision.config_from_template,
module)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
self.assertEqual(module.fail_json.call_count, 1)
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_good_no_vlan(self, mock_file_sys, mock_env, mock_debug,
mock_find):
''' Test config_from_template good. No port_vlan.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport mode trunk\n'
' no shutdown\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport mode trunk\n'
' no shutdown\n!')
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_not_called()
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_good_vlan(self, mock_file_sys, mock_env, mock_debug,
mock_find):
''' Test config_from_template good. With port_vlan.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2', port_vlan='7')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport access vlan 7\n'
' no shutdown\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None,
port_vlan=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = ('interface Ethernet3\n'
' description test\n'
' switchport\n'
' switchport access vlan 7\n'
' no shutdown\n!')
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_not_called()
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_fail_wrong_port(self, mock_file_sys, mock_env,
mock_debug, mock_find):
''' Test config_from_template fail. Wrong port number in template.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='4',
server_name='new', template='jinja.j2')
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = 'interface Ethernet3\n description test\n!'
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_called_with(msg='Template content does not'
' configure proper interface'
' - %s' % expected)
@patch('jinja2.meta.find_undeclared_variables')
@patch('jinja2.DebugUndefined')
@patch('jinja2.Environment')
@patch('jinja2.FileSystemLoader')
def test_config_from_template_fail_no_vlan(self, mock_file_sys, mock_env,
mock_debug, mock_find):
''' Test config_from_template fail. Template needs vlan but none provided.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3',
server_name='new', template='jinja.j2',
port_vlan=None)
mock_file_sys.return_value = 'file'
mock_debug.return_value = 'debug'
template_mock = Mock()
template_mock.render.return_value = ('interface Ethernet3\n'
' description test\n!')
env_mock = Mock()
env_mock.loader.get_source.return_value = ['one', 'two']
env_mock.parse.return_value = 'parsed'
env_mock.get_template.return_value = template_mock
mock_env.return_value = env_mock
mock_find.return_value = dict(server_name=None, switch_port=None,
port_vlan=None)
result = cv_server_provision.config_from_template(module)
self.assertIsNotNone(result)
expected = 'interface Ethernet3\n description test\n!'
self.assertEqual(result, expected)
self.assertEqual(mock_file_sys.call_count, 1)
self.assertEqual(mock_env.call_count, 1)
module.fail_json.assert_called_with(msg='Template jinja.j2 requires a'
' vlan. Please re-run with vlan'
' number provided.')
def test_updated_configlet_content_add(self):
''' Test updated_configlet_content. Add config.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
existing_config = '!\ninterface Ethernet3\n!\ninterface Ethernet4\n!'
new_config_block = 'interface Ethernet3\n description test\n!'
result = cv_server_provision.updated_configlet_content(module,
existing_config,
new_config_block)
expected = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4\n!')
self.assertEqual(result, expected)
module.fail_json.assert_not_called()
def test_updated_configlet_content_remove(self):
''' Test updated_configlet_content. Remove config.
'''
module = Mock()
module.params = dict(switch_name='eos', switch_port='3')
existing_config = ('!\ninterface Ethernet3\n description test\n'
'!\ninterface Ethernet4')
new_config_block = 'interface Ethernet3\n!'
result = cv_server_provision.updated_configlet_content(module,
existing_config,
new_config_block)
expected = '!\ninterface Ethernet3\n!\ninterface Ethernet4'
self.assertEqual(result, expected)
module.fail_json.assert_not_called()
def test_updated_configlet_content_no_match(self):
''' Test updated_configlet_content. Interface not in config.
'''
module = Mock()
module.fail_json.side_effect = SystemExit
module.params = dict(switch_name='eos', switch_port='2')
existing_config = '!\ninterface Ethernet3\n description test\n!'
new_config_block = 'interface Ethernet3\n!'
self.assertRaises(SystemExit,
cv_server_provision.updated_configlet_content,
module, existing_config, new_config_block)
@patch('time.sleep')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
def test_configlet_update_task_good_one_try(self, mock_info, mock_sleep):
''' Test configlet_update_task gets task after one try.
'''
module = Mock()
task = dict(data=dict(WORKFLOW_ACTION='Configlet Push'),
description='Configlet Assign',
workOrderId='7')
device_info = dict(taskIdList=[task])
mock_info.return_value = device_info
result = cv_server_provision.configlet_update_task(module)
self.assertEqual(result, '7')
mock_sleep.assert_not_called()
self.assertEqual(mock_info.call_count, 1)
@patch('time.sleep')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
def test_configlet_update_task_good_three_tries(self, mock_info, mock_sleep):
''' Test configlet_update_task gets task on third try.
'''
module = Mock()
task1 = dict(data=dict(WORKFLOW_ACTION='Configlet Push'),
description='Configlet Assign',
workOrderId='7')
task2 = dict(data=dict(WORKFLOW_ACTION='Nonsense'),
description='Configlet Assign',
workOrderId='700')
device_info = dict(taskIdList=[task1, task2])
mock_info.side_effect = [dict(), dict(), device_info]
result = cv_server_provision.configlet_update_task(module)
self.assertEqual(result, '7')
self.assertEqual(mock_sleep.call_count, 2)
self.assertEqual(mock_info.call_count, 3)
@patch('time.sleep')
@patch('ansible.modules.network.cloudvision.cv_server_provision.switch_info')
def test_configlet_update_task_no_task(self, mock_info, mock_sleep):
''' Test configlet_update_task does not get task after three tries.
'''
module = Mock()
mock_info.side_effect = [dict(), dict(), dict()]
result = cv_server_provision.configlet_update_task(module)
self.assertIsNone(result)
self.assertEqual(mock_sleep.call_count, 3)
self.assertEqual(mock_info.call_count, 3)
@patch('time.sleep')
def test_wait_for_task_completion_good_one_try(self, mock_time):
''' Test wait_for_task_completion completed. One Try.
'''
module = Mock()
module.client.api.get_task_by_id.return_value = dict(
workOrderUserDefinedStatus='Completed')
result = cv_server_provision.wait_for_task_completion(module, '7')
self.assertTrue(result)
self.assertEqual(module.client.api.get_task_by_id.call_count, 1)
module.fail_json.assert_not_called()
mock_time.assert_not_called()
@patch('time.sleep')
def test_wait_for_task_completion_good_three_tries(self, mock_time):
''' Test wait_for_task_completion completed. Three tries.
'''
module = Mock()
try_one_two = dict(workOrderUserDefinedStatus='Pending')
try_three = dict(workOrderUserDefinedStatus='Completed')
module.client.api.get_task_by_id.side_effect = [try_one_two,
try_one_two, try_three]
result = cv_server_provision.wait_for_task_completion(module, '7')
self.assertTrue(result)
self.assertEqual(module.client.api.get_task_by_id.call_count, 3)
module.fail_json.assert_not_called()
self.assertEqual(mock_time.call_count, 2)
@patch('time.sleep')
def test_wait_for_task_completion_fail(self, mock_time):
''' Test wait_for_task_completion failed.
'''
module = Mock()
try_one = dict(workOrderUserDefinedStatus='Failed')
try_two = dict(workOrderUserDefinedStatus='Completed')
module.client.api.get_task_by_id.side_effect = [try_one, try_two]
result = cv_server_provision.wait_for_task_completion(module, '7')
self.assertTrue(result)
self.assertEqual(module.client.api.get_task_by_id.call_count, 2)
text = ('Task 7 has reported status Failed. Please consult the CVP'
' admins for more information.')
module.fail_json.assert_called_with(msg=text)
self.assertEqual(mock_time.call_count, 1)
| gpl-3.0 |
wilblack/lilybot | server/backends/__init__.py | 1 | 1198 | class ApiClientBase(object):
"""
A class to help build formstacks
"""
def __init__(self):
self.bad_responses = []
def get(self, resource, data):
uri = "%s%s" %(self.base_uri, resource)
resp = requests.get(uri)
content = json.loads(resp.text)
if resp.status_code >= 300:
print res.text
self.bad_responses.append(resp)
return content
def post(self, resource, data):
uri = "%s%s?access_token=%s" %(self.base_uri, resource, self.token)
resp = requests.post(uri, data=json.dumps(data))
content = json.loads(resp.text)
if resp.status_code >= 300:
print "ERROR: %s" % (resp.status_code)
print resp.text
self.bad_responses.append(resp)
if (content['error'] == 'expired_token'):
self.reset_access_token()
self.post(resource, data)
return content
def update(self, resource, data):
pass
def print_errors(self):
print "******** ERRROR REPORT ************"
for res in self.bad_responses:
print resp
| gpl-2.0 |
llhe/tensorflow | tensorflow/python/ops/distributions/distribution.py | 26 | 38424 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import tf_inspect
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape_tensor", "batch_shape", "event_shape_tensor", "event_shape",
"sample", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "stddev", "mode",
"covariance"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distritributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented")
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._quantile(value, **kwargs)
except NotImplementedError as original_exception:
raise original_exception
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: The name to give this op.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
| apache-2.0 |
nhenezi/kuma | vendor/packages/python-dateutil/dateutil/rrule.py | 254 | 40402 | """
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| mpl-2.0 |
jhd/spunout | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| gpl-3.0 |
watonyweng/horizon | openstack_dashboard/dashboards/settings/user/forms.py | 42 | 5347 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime # noqa
import string
import babel
import babel.dates
from django.conf import settings
from django import shortcuts
from django.utils import encoding
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
import pytz
from horizon import forms
from horizon import messages
def _one_year():
now = datetime.utcnow()
return datetime(now.year + 1, now.month, now.day, now.hour,
now.minute, now.second, now.microsecond, now.tzinfo)
class UserSettingsForm(forms.SelfHandlingForm):
language = forms.ChoiceField(label=_("Language"))
timezone = forms.ChoiceField(label=_("Timezone"))
pagesize = forms.IntegerField(label=_("Items Per Page"),
min_value=1,
max_value=getattr(settings,
'API_RESULT_LIMIT',
1000),
help_text=_("Number of items to show per "
"page (applies to the pages "
"that have API supported "
"pagination)"))
instance_log_length = forms.IntegerField(
label=_("Log Lines Per Instance"), min_value=1,
help_text=_("Number of log lines to be shown per instance"))
@staticmethod
def _sorted_zones():
d = datetime(datetime.today().year, 1, 1)
zones = [(tz, pytz.timezone(tz).localize(d).strftime('%z'))
for tz in pytz.common_timezones]
zones.sort(key=lambda zone: int(zone[1]))
return zones
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
# Languages
def get_language_display_name(code, desc):
try:
desc = translation.get_language_info(code)['name_local']
desc = string.capwords(desc)
except KeyError:
# If a language is not defined in django.conf.locale.LANG_INFO
# get_language_info raises KeyError
pass
return "%s (%s)" % (desc, code)
languages = [(k, get_language_display_name(k, v))
for k, v in settings.LANGUAGES]
self.fields['language'].choices = languages
# Timezones
timezones = []
language = translation.get_language()
current_locale = translation.to_locale(language)
babel_locale = babel.Locale.parse(current_locale)
for tz, offset in self._sorted_zones():
try:
utc_offset = _("UTC %(hour)s:%(min)s") % {"hour": offset[:3],
"min": offset[3:]}
except Exception:
utc_offset = ""
if tz == "UTC":
tz_name = _("UTC")
elif tz == "GMT":
tz_name = _("GMT")
else:
tz_label = babel.dates.get_timezone_location(
tz, locale=babel_locale)
# Translators: UTC offset and timezone label
tz_name = _("%(offset)s: %(label)s") % {"offset": utc_offset,
"label": tz_label}
timezones.append((tz, tz_name))
self.fields['timezone'].choices = timezones
def handle(self, request, data):
response = shortcuts.redirect(request.build_absolute_uri())
# Language
lang_code = data['language']
if lang_code and translation.check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
expires=_one_year())
# Timezone
request.session['django_timezone'] = pytz.timezone(
data['timezone']).zone
response.set_cookie('django_timezone', data['timezone'],
expires=_one_year())
request.session['horizon_pagesize'] = data['pagesize']
response.set_cookie('horizon_pagesize', data['pagesize'],
expires=_one_year())
request.session['instance_log_length'] = data['instance_log_length']
response.set_cookie('instance_log_length',
data['instance_log_length'], expires=_one_year())
with translation.override(lang_code):
messages.success(request,
encoding.force_text(_("Settings saved.")))
return response
| apache-2.0 |
jalexvig/tensorflow | tensorflow/python/kernel_tests/map_stage_op_test.py | 28 | 21057 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class MapStageTest(test.TestCase):
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
stage = stager.put(pi, [v], [0])
k, y = stager.get(gi)
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32])
stage = stager.put(pi, [x, v], [0, 1])
k, (z, y) = stager.get(gi)
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
key, ret = stager.get(gi)
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
y = stager.put(1, [v], [0])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
_, x = stager.get(1)
y = stager.peek(1)[0]
_, z = stager.get()
self.assertEqual(x[0].device, '/device:CPU:0')
self.assertEqual(y.device, '/device:CPU:0')
self.assertEqual(z[0].device, '/device:CPU:0')
G.finalize()
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put(pi, [x], [0])
peek = stager.peek(gi)
size = stager.size()
G.finalize()
n = 10
with self.test_session(use_gpu=True, graph=G) as sess:
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
for i in range(n):
self.assertTrue(sess.run(peek, feed_dict={gi: i})[0] == i)
self.assertTrue(sess.run(size) == 10)
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
size = stager.size()
clear = stager.clear()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 3})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1, pi: 1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.uint8], memory_limit=memory_limit, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
data = np.full(chunk, i, dtype=np.uint8)
sess.run(stage, feed_dict={x: data, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
def testOrdering(self):
import six
import random
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], shapes=[[]], ordered=True)
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
n = 10
with self.test_session(use_gpu=True, graph=G) as sess:
# Keys n-1..0
keys = list(reversed(six.moves.range(n)))
for i in keys:
sess.run(stage, feed_dict={pi: i, x: i})
self.assertTrue(sess.run(size) == n)
# Check that key, values come out in ascending order
for i, k in enumerate(reversed(keys)):
get_key, values = sess.run(get)
self.assertTrue(i == k == get_key == values)
self.assertTrue(sess.run(size) == 0)
def testPartialDictInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(
sess.run([key, ret], feed_dict={
gi: 0
}) == [0, {
'x': 1,
'f': 2,
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(
sess.run([key, ret], feed_dict={
gi: 1
}) == [1, {
'x': 1,
'f': 2,
'v': 3
}])
def testPartialIndexInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xf = stager.put(pi, [x, f], [0, 2])
stage_v = stager.put(pi, [v], [1])
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(sess.run([key, ret], feed_dict={gi: 0}) == [0, [1, 1, 2]])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(sess.run([key, ret], feed_dict={gi: 1}) == [1, [1, 3, 2]])
def testPartialDictGetsAndPeeks(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
peek_xf = stager.peek(pei, ['x', 'f'])
peek_v = stager.peek(pei, ['v'])
key_xf, get_xf = stager.get(gi, ['x', 'f'])
key_v, get_v = stager.get(gi, ['v'])
pop_key_xf, pop_xf = stager.get(indices=['x', 'f'])
pop_key_v, pop_v = stager.get(pi, ['v'])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now peek at 'x' and 'f' values associated with key 0
self.assertTrue(sess.run(peek_xf, feed_dict={pei: 0}) == {'x': 1, 'f': 2})
# Peek at 'v' value associated with key 0
self.assertTrue(sess.run(peek_v, feed_dict={pei: 0}) == {'v': 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain 'x' and 'f' values associated with key 0
self.assertTrue(
sess.run([key_xf, get_xf], feed_dict={
gi: 0
}) == [0, {
'x': 1,
'f': 2
}])
# Still have 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can no longer get 'x' and 'f' from key 0
with self.assertRaises(errors.InvalidArgumentError) as cm:
sess.run([key_xf, get_xf], feed_dict={gi: 0})
exc_str = ("Tensor at index '0' for key '0' " 'has already been removed.')
self.assertTrue(exc_str in cm.exception.message)
# Obtain 'v' value associated with key 0
self.assertTrue(
sess.run([key_v, get_v], feed_dict={
gi: 0
}) == [0, {
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Pop without key to obtain 'x' and 'f' values associated with key 1
self.assertTrue(sess.run([pop_key_xf, pop_xf]) == [1, {'x': 1, 'f': 2}])
# still 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# We can now obtain 'x' and 'f' values associated with key 1
self.assertTrue(
sess.run([pop_key_v, pop_v], feed_dict={
pi: 1
}) == [1, {
'v': 1
}])
# Nothing is left
self.assertTrue(sess.run([size, isize]) == [0, 0])
def testPartialIndexGets(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test again with partial index gets
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xvf = stager.put(pi, [x, v, f], [0, 1, 2])
key_xf, get_xf = stager.get(gi, [0, 2])
key_v, get_v = stager.get(gi, [1])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage complete tuple
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get using indices
self.assertTrue(
sess.run([key_xf, get_xf], feed_dict={
gi: 0
}) == [0, [1, 2]])
# Still some of key 0 left
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get of remaining index
self.assertTrue(sess.run([key_v, get_v], feed_dict={gi: 0}) == [0, [3]])
# All gone
self.assertTrue(sess.run([size, isize]) == [0, 0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
stkubr/zipline | tests/test_events_through_risk.py | 7 | 11411 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import pytz
import numpy as np
from zipline.finance.trading import SimulationParameters
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
class BuyAndHoldAlgorithm(TradingAlgorithm):
SID_TO_BUY_AND_HOLD = 1
def initialize(self):
self.holding = False
def handle_data(self, data):
if not self.holding:
self.order(self.SID_TO_BUY_AND_HOLD, 100)
self.holding = True
class TestEventsThroughRisk(unittest.TestCase):
def test_daily_buy_and_hold(self):
start_date = datetime.datetime(
year=2006,
month=1,
day=3,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006,
month=1,
day=5,
hour=0,
minute=0,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date,
data_frequency='daily',
emission_rate='daily'
)
algo = BuyAndHoldAlgorithm(
sim_params=sim_params)
first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
trade_bar_data = [
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_date,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_date,
'source_id': 'test_list',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_date,
'source_id': 'test_list',
'type': DATASOURCE_TYPE.TRADE
}),
]
benchmark_data = [
Event({
'returns': 0.1,
'dt': first_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.2,
'dt': second_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.4,
'dt': third_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
]
algo.benchmark_return_source = benchmark_data
algo.set_sources(list([trade_bar_data]))
gen = algo._create_generator(sim_params)
# TODO: Hand derive these results.
# Currently, the output from the time of this writing to
# at least be an early warning against changes.
expected_algorithm_returns = {
first_date: 0.0,
second_date: -0.000350,
third_date: -0.050018
}
# TODO: Hand derive these results.
# Currently, the output from the time of this writing to
# at least be an early warning against changes.
expected_sharpe = {
first_date: np.nan,
second_date: -22.322677,
third_date: -9.353741
}
for bar in gen:
current_dt = algo.datetime
crm = algo.perf_tracker.cumulative_risk_metrics
np.testing.assert_almost_equal(
crm.algorithm_returns[current_dt],
expected_algorithm_returns[current_dt],
decimal=6)
np.testing.assert_almost_equal(
crm.metrics.sharpe[current_dt],
expected_sharpe[current_dt],
decimal=6,
err_msg="Mismatch at %s" % (current_dt,))
def test_minute_buy_and_hold(self):
with trading.TradingEnvironment():
start_date = datetime.datetime(
year=2006,
month=1,
day=3,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006,
month=1,
day=5,
hour=0,
minute=0,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date,
emission_rate='daily',
data_frequency='minute')
algo = BuyAndHoldAlgorithm(
sim_params=sim_params)
first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
first_open, first_close = \
trading.environment.get_open_and_close(first_date)
second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
second_open, second_close = \
trading.environment.get_open_and_close(second_date)
third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
third_open, third_close = \
trading.environment.get_open_and_close(third_date)
benchmark_data = [
Event({
'returns': 0.1,
'dt': first_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.2,
'dt': second_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.4,
'dt': third_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
]
trade_bar_data = [
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
]
algo.benchmark_return_source = benchmark_data
algo.set_sources(list([trade_bar_data]))
gen = algo._create_generator(sim_params)
crm = algo.perf_tracker.cumulative_risk_metrics
first_msg = next(gen)
self.assertIsNotNone(first_msg,
"There should be a message emitted.")
# Protects against bug where the positions appeared to be
# a day late, because benchmarks were triggering
# calculations before the events for the day were
# processed.
self.assertEqual(1, len(algo.portfolio.positions), "There should "
"be one position after the first day.")
self.assertEquals(
0,
crm.metrics.algorithm_volatility[algo.datetime.date()],
"On the first day algorithm volatility does not exist.")
second_msg = next(gen)
self.assertIsNotNone(second_msg, "There should be a message "
"emitted.")
self.assertEqual(1, len(algo.portfolio.positions),
"Number of positions should stay the same.")
# TODO: Hand derive. Current value is just a canary to
# detect changes.
np.testing.assert_almost_equal(
0.050022510129558301,
crm.algorithm_returns[-1],
decimal=6)
third_msg = next(gen)
self.assertEqual(1, len(algo.portfolio.positions),
"Number of positions should stay the same.")
self.assertIsNotNone(third_msg, "There should be a message "
"emitted.")
# TODO: Hand derive. Current value is just a canary to
# detect changes.
np.testing.assert_almost_equal(
-0.047639464532418657,
crm.algorithm_returns[-1],
decimal=6)
| apache-2.0 |
Antiun/c2c-rd-addons | c2c_budget_chricar/c2c_budget.py | 4 | 7703 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Camptocamp SA - http://www.camptocamp.com
# Author: Arnaud WÃŒst ported by Nicolas Bessi
#
# This file is part of the c2c_budget module
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from openerp.osv import fields, osv
import time
#import pooler
class c2c_budget(osv.osv):
""" camptocamp budget. The module's main object. """
_name = "c2c_budget"
_description = "Budget"
_columns = {
'code' : fields.char('Code', size=50),
'name' : fields.char('Name', size=200, required=True),
'active' : fields.boolean('Active'),
'start_date' : fields.date('Start Date', required=True),
'end_date' : fields.date('End Date', required=True),
'budget_item_id' : fields.many2one(
'c2c_budget.item',
'Budget Structure',
required=True
),
'budget_version_ids' : fields.one2many(
'c2c_budget.version',
'budget_id',
'Budget Versions',
readonly=True
),
'note' : fields.text('Notes'),
'create_date' : fields.datetime('Creation Date', readonly=True)
}
_defaults = {
'active' : lambda *a : True,
}
_order = 'name'
def name_search(self, cr, user, name, args=None,\
operator='ilike', context=None, limit=80):
"""search not only for a matching names but also for a matching codes """
if not args:
args=[]
if not context:
context={}
ids = self.search(
cr,
user,
[('code',operator,name)]+ args,
limit=limit,
context=context
)
ids += self.search(
cr,
user,
[('name',operator,name)]+ args,
limit=limit,
context=context
)
return self.name_get(
cr,
user,
ids,
context
)
def _check_start_end_dates(self, cr, uid, ids):
""" check the start date is before the end date """
lines = self.browse(cr, uid, ids)
for l in lines:
if l.end_date < l.start_date:
return False
return True
def get_periods(self, cr, uid, ids, context={}):
""" return the list of budget's periods ordered by date_start"""
period_obj = pooler.get_pool(cr.dbname).get('account.period')
result = []
if type(ids)==int:
budget_ids = [ids]
budgets = self.browse(cr, uid, budget_ids, context)
start_date = None
end_date = None
for b in budgets:
periods_ids = period_obj.search(
cr,
uid,
[
('date_stop', '>', b.start_date),
('date_start', '<', b.end_date)
],
order="date_start ASC"
)
result.append(period_obj.browse(cr, uid, periods_ids, context))
if type(ids)==int:
result = result[0]
return result
def get_periods_union(self, cr, uid, ids, context={}):
""" return the list of budget's periods ordered by date_start
it returns a unique list that cover all given budgets ids
"""
period_obj = pooler.get_pool(cr.dbname).get('account.period')
result = []
if type(ids)==int:
budget_ids = [ids]
else:
budget_ids = ids
budgets = self.browse(cr, uid, budget_ids, context)
#find the earliest start_date en latest end_date
start_date = None
end_date = None
for b in budgets:
if start_date is None or start_date > b.start_date:
start_date = b.start_date
if end_date is None or end_date < b.end_date:
end_date = b.end_date
if start_date is not None :
periods_ids = period_obj.search(
cr,
uid,
[
('date_stop', '>', start_date),
('date_start', '<', end_date)
],
order="date_start ASC"
)
result = period_obj.browse(cr, uid, periods_ids, context)
if type(ids)==int:
return result[0]
else:
return result
def unlink(self, cr, uid, ids, context={}):
"""delete all budget versions when deleting a budget """
budget_version_obj = pooler.get_pool(cr.dbname).get('c2c_budget.version')
lines_ids = budget_version_obj.search(
cr,
uid,
[('budget_id', 'in', ids)],
context=context
)
budget_version_obj.unlink(cr, uid, lines_ids, context)
return super(c2c_budget, self).unlink(cr, uid, ids, context)
_constraints = [
(_check_start_end_dates, 'Date Error: The end date is defined before the start date', ['start_date', 'end_date']),
]
c2c_budget()
| agpl-3.0 |
cce/kombu | kombu/common.py | 3 | 12037 | """
kombu.common
============
Common Utilities.
"""
from __future__ import absolute_import
import os
import socket
import threading
from collections import deque
from contextlib import contextmanager
from functools import partial
from itertools import count
from uuid import uuid4, uuid3, NAMESPACE_OID
from amqp import RecoverableConnectionError
from .entity import Exchange, Queue
from .five import range
from .log import get_logger
from .serialization import registry as serializers
from .utils import uuid
try:
from _thread import get_ident
except ImportError: # pragma: no cover
try: # noqa
from thread import get_ident # noqa
except ImportError: # pragma: no cover
from dummy_thread import get_ident # noqa
__all__ = ['Broadcast', 'maybe_declare', 'uuid',
'itermessages', 'send_reply',
'collect_replies', 'insured', 'drain_consumer',
'eventloop']
#: Prefetch count can't exceed short.
PREFETCH_COUNT_MAX = 0xFFFF
logger = get_logger(__name__)
_node_id = None
def get_node_id():
global _node_id
if _node_id is None:
_node_id = uuid4().int
return _node_id
def generate_oid(node_id, process_id, thread_id, instance):
ent = '%x-%x-%x-%x' % (node_id, process_id, thread_id, id(instance))
return str(uuid3(NAMESPACE_OID, ent))
def oid_from(instance):
return generate_oid(get_node_id(), os.getpid(), get_ident(), instance)
class Broadcast(Queue):
"""Convenience class used to define broadcast queues.
Every queue instance will have a unique name,
and both the queue and exchange is configured with auto deletion.
:keyword name: This is used as the name of the exchange.
:keyword queue: By default a unique id is used for the queue
name for every consumer. You can specify a custom queue
name here.
:keyword \*\*kwargs: See :class:`~kombu.Queue` for a list
of additional keyword arguments supported.
"""
def __init__(self, name=None, queue=None, **kwargs):
return super(Broadcast, self).__init__(
name=queue or 'bcast.%s' % (uuid(), ),
**dict({'alias': name,
'auto_delete': True,
'exchange': Exchange(name, type='fanout')}, **kwargs))
def declaration_cached(entity, channel):
return entity in channel.connection.client.declared_entities
def maybe_declare(entity, channel=None, retry=False, **retry_policy):
is_bound = entity.is_bound
if not is_bound:
assert channel
entity = entity.bind(channel)
if channel is None:
assert is_bound
channel = entity.channel
declared = ident = None
if channel.connection and entity.can_cache_declaration:
declared = channel.connection.client.declared_entities
ident = hash(entity)
if ident in declared:
return False
if retry:
return _imaybe_declare(entity, declared, ident,
channel, **retry_policy)
return _maybe_declare(entity, declared, ident, channel)
def _maybe_declare(entity, declared, ident, channel):
channel = channel or entity.channel
if not channel.connection:
raise RecoverableConnectionError('channel disconnected')
entity.declare()
if declared is not None and ident:
declared.add(ident)
return True
def _imaybe_declare(entity, declared, ident, channel, **retry_policy):
return entity.channel.connection.client.ensure(
entity, _maybe_declare, **retry_policy)(
entity, declared, ident, channel)
def drain_consumer(consumer, limit=1, timeout=None, callbacks=None):
acc = deque()
def on_message(body, message):
acc.append((body, message))
consumer.callbacks = [on_message] + (callbacks or [])
with consumer:
for _ in eventloop(consumer.channel.connection.client,
limit=limit, timeout=timeout, ignore_timeouts=True):
try:
yield acc.popleft()
except IndexError:
pass
def itermessages(conn, channel, queue, limit=1, timeout=None,
callbacks=None, **kwargs):
return drain_consumer(conn.Consumer(queues=[queue], channel=channel, **kwargs),
limit=limit, timeout=timeout, callbacks=callbacks)
def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False):
"""Best practice generator wrapper around ``Connection.drain_events``.
Able to drain events forever, with a limit, and optionally ignoring
timeout errors (a timeout of 1 is often used in environments where
the socket can get "stuck", and is a best practice for Kombu consumers).
**Examples**
``eventloop`` is a generator::
from kombu.common import eventloop
def run(connection):
it = eventloop(connection, timeout=1, ignore_timeouts=True)
next(it) # one event consumed, or timed out.
for _ in eventloop(connection, timeout=1, ignore_timeouts=True):
pass # loop forever.
It also takes an optional limit parameter, and timeout errors
are propagated by default::
for _ in eventloop(connection, limit=1, timeout=1):
pass
.. seealso::
:func:`itermessages`, which is an event loop bound to one or more
consumers, that yields any messages received.
"""
for i in limit and range(limit) or count():
try:
yield conn.drain_events(timeout=timeout)
except socket.timeout:
if timeout and not ignore_timeouts: # pragma: no cover
raise
def send_reply(exchange, req, msg,
producer=None, retry=False, retry_policy=None, **props):
"""Send reply for request.
:param exchange: Reply exchange
:param req: Original request, a message with a ``reply_to`` property.
:param producer: Producer instance
:param retry: If true must retry according to ``reply_policy`` argument.
:param retry_policy: Retry settings.
:param props: Extra properties
"""
producer.publish(
msg, exchange=exchange,
retry=retry, retry_policy=retry_policy,
**dict({'routing_key': req.properties['reply_to'],
'correlation_id': req.properties.get('correlation_id'),
'serializer': serializers.type_to_name[req.content_type],
'content_encoding': req.content_encoding}, **props)
)
def collect_replies(conn, channel, queue, *args, **kwargs):
"""Generator collecting replies from ``queue``"""
no_ack = kwargs.setdefault('no_ack', True)
received = False
try:
for body, message in itermessages(conn, channel, queue,
*args, **kwargs):
if not no_ack:
message.ack()
received = True
yield body
finally:
if received:
channel.after_reply_message_received(queue.name)
def _ensure_errback(exc, interval):
logger.error(
'Connection error: %r. Retry in %ss\n', exc, interval,
exc_info=True,
)
@contextmanager
def _ignore_errors(conn):
try:
yield
except conn.connection_errors + conn.channel_errors:
pass
def ignore_errors(conn, fun=None, *args, **kwargs):
"""Ignore connection and channel errors.
The first argument must be a connection object, or any other object
with ``connection_error`` and ``channel_error`` attributes.
Can be used as a function:
.. code-block:: python
def example(connection):
ignore_errors(connection, consumer.channel.close)
or as a context manager:
.. code-block:: python
def example(connection):
with ignore_errors(connection):
consumer.channel.close()
.. note::
Connection and channel errors should be properly handled,
and not ignored. Using this function is only acceptable in a cleanup
phase, like when a connection is lost or at shutdown.
"""
if fun:
with _ignore_errors(conn):
return fun(*args, **kwargs)
return _ignore_errors(conn)
def revive_connection(connection, channel, on_revive=None):
if on_revive:
on_revive(channel)
def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts):
"""Ensures function performing broker commands completes
despite intermittent connection failures."""
errback = errback or _ensure_errback
with pool.acquire(block=True) as conn:
conn.ensure_connection(errback=errback)
# we cache the channel for subsequent calls, this has to be
# reset on revival.
channel = conn.default_channel
revive = partial(revive_connection, conn, on_revive=on_revive)
insured = conn.autoretry(fun, channel, errback=errback,
on_revive=revive, **opts)
retval, _ = insured(*args, **dict(kwargs, connection=conn))
return retval
class QoS(object):
"""Thread safe increment/decrement of a channels prefetch_count.
:param callback: Function used to set new prefetch count,
e.g. ``consumer.qos`` or ``channel.basic_qos``. Will be called
with a single ``prefetch_count`` keyword argument.
:param initial_value: Initial prefetch count value.
**Example usage**
.. code-block:: python
>>> from kombu import Consumer, Connection
>>> connection = Connection('amqp://')
>>> consumer = Consumer(connection)
>>> qos = QoS(consumer.qos, initial_prefetch_count=2)
>>> qos.update() # set initial
>>> qos.value
2
>>> def in_some_thread():
... qos.increment_eventually()
>>> def in_some_other_thread():
... qos.decrement_eventually()
>>> while 1:
... if qos.prev != qos.value:
... qos.update() # prefetch changed so update.
It can be used with any function supporting a ``prefetch_count`` keyword
argument::
>>> channel = connection.channel()
>>> QoS(channel.basic_qos, 10)
>>> def set_qos(prefetch_count):
... print('prefetch count now: %r' % (prefetch_count, ))
>>> QoS(set_qos, 10)
"""
prev = None
def __init__(self, callback, initial_value):
self.callback = callback
self._mutex = threading.RLock()
self.value = initial_value or 0
def increment_eventually(self, n=1):
"""Increment the value, but do not update the channels QoS.
The MainThread will be responsible for calling :meth:`update`
when necessary.
"""
with self._mutex:
if self.value:
self.value = self.value + max(n, 0)
return self.value
def decrement_eventually(self, n=1):
"""Decrement the value, but do not update the channels QoS.
The MainThread will be responsible for calling :meth:`update`
when necessary.
"""
with self._mutex:
if self.value:
self.value -= n
if self.value < 1:
self.value = 1
return self.value
def set(self, pcount):
"""Set channel prefetch_count setting."""
if pcount != self.prev:
new_value = pcount
if pcount > PREFETCH_COUNT_MAX:
logger.warn('QoS: Disabled: prefetch_count exceeds %r',
PREFETCH_COUNT_MAX)
new_value = 0
logger.debug('basic.qos: prefetch_count->%s', new_value)
self.callback(prefetch_count=new_value)
self.prev = pcount
return pcount
def update(self):
"""Update prefetch count with current value."""
with self._mutex:
return self.set(self.value)
| bsd-3-clause |
mwrightevent38/MissionPlanner | Lib/site-packages/numpy/lib/tests/test_function_base.py | 53 | 38511 | from numpy.testing import *
import numpy.lib
from numpy.lib import *
from numpy.core import *
from numpy import matrix, asmatrix
import numpy as np
class TestAny(TestCase):
def test_basic(self):
y1 = [0, 0, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 0, 1, 0]
assert(any(y1))
assert(any(y3))
assert(not any(y2))
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert(any(y1))
assert_array_equal(sometrue(y1, axis=0), [1, 1, 0])
assert_array_equal(sometrue(y1, axis=1), [0, 1, 1])
class TestAll(TestCase):
def test_basic(self):
y1 = [0, 1, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 1, 1, 1]
assert(not all(y1))
assert(all(y3))
assert(not all(y2))
assert(all(~array(y2)))
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert(not all(y1))
assert_array_equal(alltrue(y1, axis=0), [0, 0, 1])
assert_array_equal(alltrue(y1, axis=1), [0, 0, 1])
class TestAverage(TestCase):
def test_basic(self):
y1 = array([1, 2, 3])
assert(average(y1, axis=0) == 2.)
y2 = array([1., 2., 3.])
assert(average(y2, axis=0) == 2.)
y3 = [0., 0., 0.]
assert(average(y3, axis=0) == 0.)
y4 = ones((4, 4))
y4[0, 1] = 0
y4[1, 0] = 2
assert_almost_equal(y4.mean(0), average(y4, 0))
assert_almost_equal(y4.mean(1), average(y4, 1))
y5 = rand(5, 5)
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
y6 = matrix(rand(5, 5))
assert_array_equal(y6.mean(0), average(y6, 0))
def test_weights(self):
y = arange(10)
w = arange(10)
actual = average(y, weights=w)
desired = (arange(10) ** 2).sum()*1. / arange(10).sum()
assert_almost_equal(actual, desired)
y1 = array([[1, 2, 3], [4, 5, 6]])
w0 = [1, 2]
actual = average(y1, weights=w0, axis=0)
desired = array([3., 4., 5.])
assert_almost_equal(actual, desired)
w1 = [0, 0, 1]
actual = average(y1, weights=w1, axis=1)
desired = array([3., 6.])
assert_almost_equal(actual, desired)
# This should raise an error. Can we test for that ?
# assert_equal(average(y1, weights=w1), 9./2.)
# 2D Case
w2 = [[0, 0, 1], [0, 0, 2]]
desired = array([3., 6.])
assert_array_equal(average(y1, weights=w2, axis=1), desired)
assert_equal(average(y1, weights=w2), 5.)
def test_returned(self):
y = array([[1, 2, 3], [4, 5, 6]])
# No weights
avg, scl = average(y, returned=True)
assert_equal(scl, 6.)
avg, scl = average(y, 0, returned=True)
assert_array_equal(scl, array([2., 2., 2.]))
avg, scl = average(y, 1, returned=True)
assert_array_equal(scl, array([3., 3.]))
# With weights
w0 = [1, 2]
avg, scl = average(y, weights=w0, axis=0, returned=True)
assert_array_equal(scl, array([3., 3., 3.]))
w1 = [1, 2, 3]
avg, scl = average(y, weights=w1, axis=1, returned=True)
assert_array_equal(scl, array([6., 6.]))
w2 = [[0, 0, 1], [1, 2, 3]]
avg, scl = average(y, weights=w2, axis=1, returned=True)
assert_array_equal(scl, array([1., 6.]))
class TestSelect(TestCase):
def _select(self, cond, values, default=0):
output = []
for m in range(len(cond)):
output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
return output
def test_basic(self):
choices = [array([1, 2, 3]),
array([4, 5, 6]),
array([7, 8, 9])]
conditions = [array([0, 0, 0]),
array([0, 1, 0]),
array([0, 0, 1])]
assert_array_equal(select(conditions, choices, default=15),
self._select(conditions, choices, default=15))
assert_equal(len(choices), 3)
assert_equal(len(conditions), 3)
class TestInsert(TestCase):
def test_basic(self):
a = [1, 2, 3]
assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
class TestAmax(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(amax(a), 10.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(amax(b, axis=0), [8.0, 10.0, 9.0])
assert_equal(amax(b, axis=1), [9.0, 10.0, 8.0])
class TestAmin(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(amin(a), -5.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(amin(b, axis=0), [3.0, 3.0, 2.0])
assert_equal(amin(b, axis=1), [3.0, 4.0, 2.0])
class TestPtp(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(ptp(a, axis=0), 15.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(ptp(b, axis=0), [5.0, 7.0, 7.0])
assert_equal(ptp(b, axis= -1), [6.0, 6.0, 6.0])
class TestCumsum(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [int8, uint8, int16, uint16, int32, uint32,
float32, float64, complex64, complex128]:
a = array(ba, ctype)
a2 = array(ba2, ctype)
assert_array_equal(cumsum(a, axis=0), array([1, 3, 13, 24, 30, 35, 39], ctype))
assert_array_equal(cumsum(a2, axis=0), array([[1, 2, 3, 4], [6, 8, 10, 13],
[16, 11, 14, 18]], ctype))
assert_array_equal(cumsum(a2, axis=1),
array([[1, 3, 6, 10],
[5, 11, 18, 27],
[10, 13, 17, 22]], ctype))
class TestProd(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [int16, uint16, int32, uint32,
float32, float64, complex64, complex128]:
a = array(ba, ctype)
a2 = array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, prod, a)
self.assertRaises(ArithmeticError, prod, a2, 1)
self.assertRaises(ArithmeticError, prod, a)
else:
assert_equal(prod(a, axis=0), 26400)
assert_array_equal(prod(a2, axis=0),
array([50, 36, 84, 180], ctype))
assert_array_equal(prod(a2, axis= -1), array([24, 1890, 600], ctype))
class TestCumprod(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [int16, uint16, int32, uint32,
float32, float64, complex64, complex128]:
a = array(ba, ctype)
a2 = array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, cumprod, a)
self.assertRaises(ArithmeticError, cumprod, a2, 1)
self.assertRaises(ArithmeticError, cumprod, a)
else:
assert_array_equal(cumprod(a, axis= -1),
array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(cumprod(a2, axis=0),
array([[ 1, 2, 3, 4],
[ 5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(cumprod(a2, axis= -1),
array([[ 1, 2, 6, 24],
[ 5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
class TestDiff(TestCase):
def test_basic(self):
x = [1, 4, 6, 7, 12]
out = array([3, 2, 1, 5])
out2 = array([-1, -1, 4])
out3 = array([0, 5])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
out2 = out1[:, :, 1:] - out1[:, :, :-1]
out3 = x[1:, :, :] - x[:-1, :, :]
out4 = out3[1:, :, :] - out3[:-1, :, :]
assert_array_equal(diff(x), out1)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
class TestGradient(TestCase):
def test_basic(self):
x = array([[1, 1], [3, 4]])
dx = [array([[2., 3.], [2., 3.]]),
array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x), dx)
def test_badargs(self):
# for 2D array, gradient can take 0,1, or 2 extra args
x = array([[1, 1], [3, 4]])
assert_raises(SyntaxError, gradient, x, array([1., 1.]),
array([1., 1.]), array([1., 1.]))
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
x = np.ma.array([[1, 1], [3, 4]])
assert_equal(type(gradient(x)[0]), type(x))
class TestAngle(TestCase):
def test_basic(self):
x = [1 + 3j, sqrt(2) / 2.0 + 1j * sqrt(2) / 2, 1, 1j, -1, -1j, 1 - 3j, -1 + 3j]
y = angle(x)
yo = [arctan(3.0 / 1.0), arctan(1.0), 0, pi / 2, pi, -pi / 2.0,
- arctan(3.0 / 1.0), pi - arctan(3.0 / 1.0)]
z = angle(x, deg=1)
zo = array(yo) * 180 / pi
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
class TestTrimZeros(TestCase):
""" only testing for integer splits.
"""
def test_basic(self):
a = array([0, 0, 1, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, array([1, 2, 3, 4]))
def test_leading_skip(self):
a = array([0, 0, 1, 0, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, array([1, 0, 2, 3, 4]))
def test_trailing_skip(self):
a = array([0, 0, 1, 0, 2, 3, 0, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, array([1, 0, 2, 3, 0, 4]))
class TestExtins(TestCase):
def test_basic(self):
a = array([1, 3, 2, 1, 2, 3, 3])
b = extract(a > 1, a)
assert_array_equal(b, [3, 2, 2, 3, 3])
def test_place(self):
a = array([1, 4, 3, 2, 5, 8, 7])
place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])
assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])
def test_both(self):
a = rand(10)
mask = a > 0.5
ac = a.copy()
c = extract(mask, a)
place(a, mask, 0)
place(a, mask, c)
assert_array_equal(a, ac)
class TestVectorize(TestCase):
def test_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_scalar(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], 5)
assert_array_equal(r, [5, 8, 1, 4])
def test_large(self):
x = linspace(-3, 2, 10000)
f = vectorize(lambda x: x)
y = f(x)
assert_array_equal(y, x)
def test_ufunc(self):
import math
f = vectorize(math.cos)
args = array([0, 0.5*pi, pi, 1.5*pi, 2*pi])
r1 = f(args)
r2 = cos(args)
assert_array_equal(r1, r2)
def test_keywords(self):
import math
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = array([1,2,3])
r1 = f(args)
r2 = array([2,3,4])
assert_array_equal(r1, r2)
r1 = f(args, 2)
r2 = array([3,4,5])
assert_array_equal(r1, r2)
def test_keywords_no_func_code(self):
# This needs to test a function that has keywords but
# no func_code attribute, since otherwise vectorize will
# inspect the func_code.
import random
try:
f = vectorize(random.randrange)
except:
raise AssertionError()
class TestDigitize(TestCase):
def test_forward(self):
x = arange(-6, 5)
bins = arange(-5, 5)
assert_array_equal(digitize(x, bins), arange(11))
def test_reverse(self):
x = arange(5, -6, -1)
bins = arange(5, -5, -1)
assert_array_equal(digitize(x, bins), arange(11))
def test_random(self):
x = rand(10)
bin = linspace(x.min(), x.max(), 10)
assert all(digitize(x, bin) != 0)
class TestUnwrap(TestCase):
def test_simple(self):
#check that unwrap removes jumps greather that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * pi]), [1, 1])
#check that unwrap maintans continuity
assert(all(diff(unwrap(rand(10) * 100)) < pi))
class TestFilterwindows(TestCase):
def test_hanning(self):
#check symmetry
w = hanning(10)
assert_array_almost_equal(w, flipud(w), 7)
#check known value
assert_almost_equal(sum(w, axis=0), 4.500, 4)
def test_hamming(self):
#check symmetry
w = hamming(10)
assert_array_almost_equal(w, flipud(w), 7)
#check known value
assert_almost_equal(sum(w, axis=0), 4.9400, 4)
def test_bartlett(self):
#check symmetry
w = bartlett(10)
assert_array_almost_equal(w, flipud(w), 7)
#check known value
assert_almost_equal(sum(w, axis=0), 4.4444, 4)
def test_blackman(self):
#check symmetry
w = blackman(10)
assert_array_almost_equal(w, flipud(w), 7)
#check known value
assert_almost_equal(sum(w, axis=0), 3.7800, 4)
class TestTrapz(TestCase):
def test_simple(self):
r = trapz(exp(-1.0 / 2 * (arange(-10, 10, .1)) ** 2) / sqrt(2 * pi), dx=0.1)
#check integral of normal equals 1
assert_almost_equal(sum(r, axis=0), 1, 7)
def test_ndim(self):
x = linspace(0, 1, 3)
y = linspace(0, 2, 8)
z = linspace(0, 3, 13)
wx = ones_like(x) * (x[1] - x[0])
wx[0] /= 2
wx[-1] /= 2
wy = ones_like(y) * (y[1] - y[0])
wy[0] /= 2
wy[-1] /= 2
wz = ones_like(z) * (z[1] - z[0])
wz[0] /= 2
wz[-1] /= 2
q = x[:, None, None] + y[None, :, None] + z[None, None, :]
qx = (q * wx[:, None, None]).sum(axis=0)
qy = (q * wy[None, :, None]).sum(axis=1)
qz = (q * wz[None, None, :]).sum(axis=2)
# n-d `x`
r = trapz(q, x=x[:, None, None], axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y[None, :, None], axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z[None, None, :], axis=2)
assert_almost_equal(r, qz)
# 1-d `x`
r = trapz(q, x=x, axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y, axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z, axis=2)
assert_almost_equal(r, qz)
def test_masked(self):
#Testing that masked arrays behave as if the function is 0 where
#masked
x = arange(5)
y = x * x
mask = x == 2
ym = np.ma.array(y, mask=mask)
r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
assert_almost_equal(trapz(ym, x), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(ym, xm), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
def test_matrix(self):
#Test to make sure matrices give the same answer as ndarrays
x = linspace(0, 5)
y = x * x
r = trapz(y, x)
mx = matrix(x)
my = matrix(y)
mr = trapz(my, mx)
assert_almost_equal(mr, r)
class TestSinc(TestCase):
def test_simple(self):
assert(sinc(0) == 1)
w = sinc(linspace(-1, 1, 100))
#check symmetry
assert_array_almost_equal(w, flipud(w), 7)
def test_array_like(self):
x = [0, 0.5]
y1 = sinc(array(x))
y2 = sinc(list(x))
y3 = sinc(tuple(x))
assert_array_equal(y1, y2)
assert_array_equal(y1, y3)
class TestHistogram(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_simple(self):
n = 100
v = rand(n)
(a, b) = histogram(v)
#check if the sum of the bins equals the number of samples
assert_equal(sum(a, axis=0), n)
#check that the bin counts are evenly spaced when the data is from a
# linear function
(a, b) = histogram(linspace(0, 10, 100))
assert_array_equal(a, 10)
def test_one_bin(self):
# Ticket 632
hist, edges = histogram([1, 2, 3, 4], [1, 2])
assert_array_equal(hist, [2, ])
assert_array_equal(edges, [1, 2])
def test_normed(self):
# Check that the integral of the density equals 1.
n = 100
v = rand(n)
a, b = histogram(v, normed=True)
area = sum(a * diff(b))
assert_almost_equal(area, 1)
warnings.filterwarnings('ignore',
message="\s*This release of NumPy fixes a normalization bug")
# Check with non-constant bin widths
v = np.arange(10)
bins = [0,1,3,6,10]
a, b = histogram(v, bins, normed=True)
assert_array_equal(a, .1)
assert_equal(sum(a*diff(b)), 1)
# Variale bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
bins = [0,1,3,6,np.inf]
a, b = histogram(v, bins, normed=True)
assert_array_equal(a, [.1,.1,.1,0.])
# Taken from a bug report from N. Becker on the numpy-discussion
# mailing list Aug. 6, 2010.
counts, dmy = np.histogram([1,2,3,4], [0.5,1.5,np.inf], normed=True)
assert_equal(counts, [.25, 0])
warnings.filters.pop(0)
def test_outliers(self):
# Check that outliers are not tallied
a = arange(10) + .5
# Lower outliers
h, b = histogram(a, range=[0, 9])
assert_equal(h.sum(), 9)
# Upper outliers
h, b = histogram(a, range=[1, 10])
assert_equal(h.sum(), 9)
# Normalization
h, b = histogram(a, range=[1, 9], normed=True)
assert_equal((h * diff(b)).sum(), 1)
# Weights
w = arange(10) + .5
h, b = histogram(a, range=[1, 9], weights=w, normed=True)
assert_equal((h * diff(b)).sum(), 1)
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
def test_type(self):
# Check the type of the returned histogram
a = arange(10) + .5
h, b = histogram(a)
assert(issubdtype(h.dtype, int))
h, b = histogram(a, normed=True)
assert(issubdtype(h.dtype, float))
h, b = histogram(a, weights=ones(10, int))
assert(issubdtype(h.dtype, int))
h, b = histogram(a, weights=ones(10, float))
assert(issubdtype(h.dtype, float))
def test_weights(self):
v = rand(100)
w = ones(100) * 5
a, b = histogram(v)
na, nb = histogram(v, normed=True)
wa, wb = histogram(v, weights=w)
nwa, nwb = histogram(v, weights=w, normed=True)
assert_array_almost_equal(a * 5, wa)
assert_array_almost_equal(na, nwa)
# Check weights are properly applied.
v = linspace(0, 10, 10)
w = concatenate((zeros(5), ones(5)))
wa, wb = histogram(v, bins=arange(11), weights=w)
assert_array_almost_equal(wa, w)
# Check with integer weights
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
assert_array_equal(wa, [4, 5, 0, 1])
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True)
assert_array_almost_equal(wa, array([4, 5, 0, 1]) / 10. / 3. * 4)
warnings.filterwarnings('ignore', \
message="\s*This release of NumPy fixes a normalization bug")
# Check weights with non-uniform bin widths
a,b = histogram(np.arange(9), [0,1,3,6,10], \
weights=[2,1,1,1,1,1,1,1,1], normed=True)
assert_almost_equal(a, [.2, .1, .1, .075])
warnings.filters.pop(0)
class TestHistogramdd(TestCase):
def test_simple(self):
x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
H, edges = histogramdd(x, (2, 3, 3), range=[[-1, 1], [0, 3], [0, 3]])
answer = asarray([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 0, 1],
[0, 0, 1]]])
assert_array_equal(H, answer)
# Check normalization
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
H, edges = histogramdd(x, bins=ed, normed=True)
assert(all(H == answer / 12.))
# Check that H has the correct shape.
H, edges = histogramdd(x, (2, 3, 4), range=[[-1, 1], [0, 3], [0, 4]],
normed=True)
answer = asarray([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 1, 0, 0],
[0, 0, 1, 0], [0, 0, 1, 0]]])
assert_array_almost_equal(H, answer / 6., 4)
# Check that a sequence of arrays is accepted and H has the correct
# shape.
z = [squeeze(y) for y in split(x, 3, axis=1)]
H, edges = histogramdd(z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
answer = asarray([[[0, 0], [0, 0], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
assert_array_equal(H, answer)
Z = zeros((5, 5, 5))
Z[range(5), range(5), range(5)] = 1.
H, edges = histogramdd([arange(5), arange(5), arange(5)], 5)
assert_array_equal(H, Z)
def test_shape_3d(self):
# All possible permutations for bins of different lengths in 3D.
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
(4, 5, 6))
r = rand(10, 3)
for b in bins:
H, edges = histogramdd(r, b)
assert(H.shape == b)
def test_shape_4d(self):
# All possible permutations for bins of different lengths in 4D.
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
r = rand(10, 4)
for b in bins:
H, edges = histogramdd(r, b)
assert(H.shape == b)
def test_weights(self):
v = rand(100, 2)
hist, edges = histogramdd(v)
n_hist, edges = histogramdd(v, normed=True)
w_hist, edges = histogramdd(v, weights=ones(100))
assert_array_equal(w_hist, hist)
w_hist, edges = histogramdd(v, weights=ones(100) * 2, normed=True)
assert_array_equal(w_hist, n_hist)
w_hist, edges = histogramdd(v, weights=ones(100, int) * 2)
assert_array_equal(w_hist, 2 * hist)
def test_identical_samples(self):
x = zeros((10, 2), int)
hist, edges = histogramdd(x, bins=2)
assert_array_equal(edges[0], array([-0.5, 0. , 0.5]))
class TestUnique(TestCase):
def test_simple(self):
x = array([4, 3, 2, 1, 1, 2, 3, 4, 0])
assert(all(unique(x) == [0, 1, 2, 3, 4]))
assert(unique(array([1, 1, 1, 1, 1])) == array([1]))
x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
assert(all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
x = array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert(all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
class TestCheckFinite(TestCase):
def test_simple(self):
a = [1, 2, 3]
b = [1, 2, inf]
c = [1, 2, nan]
numpy.lib.asarray_chkfinite(a)
assert_raises(ValueError, numpy.lib.asarray_chkfinite, b)
assert_raises(ValueError, numpy.lib.asarray_chkfinite, c)
class TestNaNFuncts(TestCase):
def setUp(self):
self.A = array([[[ nan, 0.01319214, 0.01620964],
[ 0.11704017, nan, 0.75157887],
[ 0.28333658, 0.1630199 , nan ]],
[[ 0.59541557, nan, 0.37910852],
[ nan, 0.87964135, nan ],
[ 0.70543747, nan, 0.34306596]],
[[ 0.72687499, 0.91084584, nan ],
[ 0.84386844, 0.38944762, 0.23913896],
[ nan, 0.37068164, 0.33850425]]])
def test_nansum(self):
assert_almost_equal(nansum(self.A), 8.0664079100000006)
assert_almost_equal(nansum(self.A, 0),
array([[ 1.32229056, 0.92403798, 0.39531816],
[ 0.96090861, 1.26908897, 0.99071783],
[ 0.98877405, 0.53370154, 0.68157021]]))
assert_almost_equal(nansum(self.A, 1),
array([[ 0.40037675, 0.17621204, 0.76778851],
[ 1.30085304, 0.87964135, 0.72217448],
[ 1.57074343, 1.6709751 , 0.57764321]]))
assert_almost_equal(nansum(self.A, 2),
array([[ 0.02940178, 0.86861904, 0.44635648],
[ 0.97452409, 0.87964135, 1.04850343],
[ 1.63772083, 1.47245502, 0.70918589]]))
def test_nanmin(self):
assert_almost_equal(nanmin(self.A), 0.01319214)
assert_almost_equal(nanmin(self.A, 0),
array([[ 0.59541557, 0.01319214, 0.01620964],
[ 0.11704017, 0.38944762, 0.23913896],
[ 0.28333658, 0.1630199 , 0.33850425]]))
assert_almost_equal(nanmin(self.A, 1),
array([[ 0.11704017, 0.01319214, 0.01620964],
[ 0.59541557, 0.87964135, 0.34306596],
[ 0.72687499, 0.37068164, 0.23913896]]))
assert_almost_equal(nanmin(self.A, 2),
array([[ 0.01319214, 0.11704017, 0.1630199 ],
[ 0.37910852, 0.87964135, 0.34306596],
[ 0.72687499, 0.23913896, 0.33850425]]))
assert nanmin([nan, nan]) is nan
def test_nanargmin(self):
assert_almost_equal(nanargmin(self.A), 1)
assert_almost_equal(nanargmin(self.A, 0),
array([[1, 0, 0],
[0, 2, 2],
[0, 0, 2]]))
assert_almost_equal(nanargmin(self.A, 1),
array([[1, 0, 0],
[0, 1, 2],
[0, 2, 1]]))
assert_almost_equal(nanargmin(self.A, 2),
array([[1, 0, 1],
[2, 1, 2],
[0, 2, 2]]))
def test_nanmax(self):
assert_almost_equal(nanmax(self.A), 0.91084584000000002)
assert_almost_equal(nanmax(self.A, 0),
array([[ 0.72687499, 0.91084584, 0.37910852],
[ 0.84386844, 0.87964135, 0.75157887],
[ 0.70543747, 0.37068164, 0.34306596]]))
assert_almost_equal(nanmax(self.A, 1),
array([[ 0.28333658, 0.1630199 , 0.75157887],
[ 0.70543747, 0.87964135, 0.37910852],
[ 0.84386844, 0.91084584, 0.33850425]]))
assert_almost_equal(nanmax(self.A, 2),
array([[ 0.01620964, 0.75157887, 0.28333658],
[ 0.59541557, 0.87964135, 0.70543747],
[ 0.91084584, 0.84386844, 0.37068164]]))
def test_nanmin_allnan_on_axis(self):
assert_array_equal(isnan(nanmin([[nan] * 2] * 3, axis=1)),
[True, True, True])
def test_nanmin_masked(self):
a = np.ma.fix_invalid([[2, 1, 3, nan], [5, 2, 3, nan]])
ctrl_mask = a._mask.copy()
test = np.nanmin(a, axis=1)
assert_equal(test, [1, 2])
assert_equal(a._mask, ctrl_mask)
assert_equal(np.isinf(a), np.zeros((2, 4), dtype=bool))
class TestNanFunctsIntTypes(TestCase):
int_types = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
def setUp(self, *args, **kwargs):
self.A = array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.A.astype(dtype)
def test_nanmin(self):
min_value = min(self.A)
for A in self.integer_arrays():
assert_equal(nanmin(A), min_value)
def test_nanmax(self):
max_value = max(self.A)
for A in self.integer_arrays():
assert_equal(nanmax(A), max_value)
def test_nanargmin(self):
min_arg = argmin(self.A)
for A in self.integer_arrays():
assert_equal(nanargmin(A), min_arg)
def test_nanargmax(self):
max_arg = argmax(self.A)
for A in self.integer_arrays():
assert_equal(nanargmax(A), max_arg)
class TestCorrCoef(TestCase):
A = array([[ 0.15391142, 0.18045767, 0.14197213],
[ 0.70461506, 0.96474128, 0.27906989],
[ 0.9297531 , 0.32296769, 0.19267156]])
B = array([[ 0.10377691, 0.5417086 , 0.49807457],
[ 0.82872117, 0.77801674, 0.39226705],
[ 0.9314666 , 0.66800209, 0.03538394]])
res1 = array([[ 1. , 0.9379533 , -0.04931983],
[ 0.9379533 , 1. , 0.30007991],
[-0.04931983, 0.30007991, 1. ]])
res2 = array([[ 1. , 0.9379533 , -0.04931983,
0.30151751, 0.66318558, 0.51532523],
[ 0.9379533 , 1. , 0.30007991,
- 0.04781421, 0.88157256, 0.78052386],
[-0.04931983, 0.30007991, 1. ,
- 0.96717111, 0.71483595, 0.83053601],
[ 0.30151751, -0.04781421, -0.96717111,
1. , -0.51366032, -0.66173113],
[ 0.66318558, 0.88157256, 0.71483595,
- 0.51366032, 1. , 0.98317823],
[ 0.51532523, 0.78052386, 0.83053601,
- 0.66173113, 0.98317823, 1. ]])
def test_simple(self):
assert_almost_equal(corrcoef(self.A), self.res1)
assert_almost_equal(corrcoef(self.A, self.B), self.res2)
def test_ddof(self):
assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
class Test_i0(TestCase):
def test_simple(self):
assert_almost_equal(i0(0.5), array(1.0634833707413234))
A = array([ 0.49842636, 0.6969809 , 0.22011976, 0.0155549])
assert_almost_equal(i0(A),
array([ 1.06307822, 1.12518299, 1.01214991, 1.00006049]))
B = array([[ 0.827002 , 0.99959078],
[ 0.89694769, 0.39298162],
[ 0.37954418, 0.05206293],
[ 0.36465447, 0.72446427],
[ 0.48164949, 0.50324519]])
assert_almost_equal(i0(B),
array([[ 1.17843223, 1.26583466],
[ 1.21147086, 1.0389829 ],
[ 1.03633899, 1.00067775],
[ 1.03352052, 1.13557954],
[ 1.0588429 , 1.06432317]]))
class TestKaiser(TestCase):
def test_simple(self):
assert_almost_equal(kaiser(0, 1.0), array([]))
assert isfinite(kaiser(1, 1.0))
assert_almost_equal(kaiser(2, 1.0), array([ 0.78984831, 0.78984831]))
assert_almost_equal(kaiser(5, 1.0),
array([ 0.78984831, 0.94503323, 1. ,
0.94503323, 0.78984831]))
assert_almost_equal(kaiser(5, 1.56789),
array([ 0.58285404, 0.88409679, 1. ,
0.88409679, 0.58285404]))
def test_int_beta(self):
kaiser(3, 4)
class TestMsort(TestCase):
def test_simple(self):
A = array([[ 0.44567325, 0.79115165, 0.5490053 ],
[ 0.36844147, 0.37325583, 0.96098397],
[ 0.64864341, 0.52929049, 0.39172155]])
assert_almost_equal(msort(A),
array([[ 0.36844147, 0.37325583, 0.39172155],
[ 0.44567325, 0.52929049, 0.5490053 ],
[ 0.64864341, 0.79115165, 0.96098397]]))
class TestMeshgrid(TestCase):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
assert all(X == array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]))
assert all(Y == array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]]))
class TestPiecewise(TestCase):
def test_simple(self):
# Condition is single bool list
x = piecewise([0, 0], [True, False], [1])
assert_array_equal(x, [1, 0])
# List of conditions: single bool list
x = piecewise([0, 0], [[True, False]], [1])
assert_array_equal(x, [1, 0])
# Conditions is single bool array
x = piecewise([0, 0], array([True, False]), [1])
assert_array_equal(x, [1, 0])
# Condition is single int array
x = piecewise([0, 0], array([1, 0]), [1])
assert_array_equal(x, [1, 0])
# List of conditions: int array
x = piecewise([0, 0], [array([1, 0])], [1])
assert_array_equal(x, [1, 0])
x = piecewise([0, 0], [[False, True]], [lambda x:-1])
assert_array_equal(x, [0, -1])
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
def test_default(self):
# No value specified for x[1], should be 0
x = piecewise([1, 2], [True, False], [2])
assert_array_equal(x, [2, 0])
# Should set x[1] to 3
x = piecewise([1, 2], [True, False], [2, 3])
assert_array_equal(x, [2, 3])
def test_0d(self):
x = array(3)
y = piecewise(x, x > 3, [4, 0])
assert y.ndim == 0
assert y == 0
class TestBincount(TestCase):
def test_simple(self):
y = np.bincount(np.arange(4))
assert_array_equal(y, np.ones(4))
def test_simple2(self):
y = np.bincount(np.array([1, 5, 2, 4, 1]))
assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
def test_simple_weight(self):
x = np.arange(4)
w = np.array([0.2, 0.3, 0.5, 0.1])
y = np.bincount(x, w)
assert_array_equal(y, w)
def test_simple_weight2(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
class TestInterp(TestCase):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
assert_raises(ValueError, interp, 0, [0], [1, 2])
def test_basic(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.linspace(0, 1, 50)
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
assert_equal(interp([-1, 0, 1], [0], [1]), [1,1,1])
assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0,1,1])
assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1,1,0])
assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0,1,0])
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = 0
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = .3
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float32(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float64(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
def test_zero_dimensional_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.array(.3, dtype=object)
assert_almost_equal(np.interp(x0, x, y), .3)
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
def test_percentile_list():
assert_equal(np.percentile([1, 2, 3], 0), 1)
def test_percentile_out():
x = np.array([1, 2, 3])
y = np.zeros((3,))
p = (1, 2, 3)
np.percentile(x, p, out=y)
assert_equal(y, np.percentile(x, p))
x = np.array([[1, 2, 3],
[4, 5, 6]])
y = np.zeros((3, 3))
np.percentile(x, p, axis=0, out=y)
assert_equal(y, np.percentile(x, p, axis=0))
y = np.zeros((3, 2))
np.percentile(x, p, axis=1, out=y)
assert_equal(y, np.percentile(x, p, axis=1))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
fabtools/fabtools | fabtools/tests/functional_tests/test_ssh.py | 14 | 1998 | """
Test SSH hardening operations
"""
from textwrap import dedent
import pytest
from fabric.api import quiet
from fabric.contrib.files import contains
from fabtools.require import file as require_file
SSHD_CONFIG = '/tmp/sshd_config'
SSHD_CONFIG_CONTENTS = [
"""
""",
"""
PasswordAuthentication yes
PermitRootLogin yes
""",
"""
PasswordAuthentication yes
PermitRootLogin no
""",
"""
PasswordAuthentication no
PermitRootLogin yes
""",
"""
PasswordAuthentication no
PermitRootLogin no
""",
]
@pytest.fixture(scope='module', params=SSHD_CONFIG_CONTENTS)
def sshd_config(request):
require_file(SSHD_CONFIG, contents=dedent(request.param))
def test_disable_password_auth(sshd_config):
from fabtools.ssh import disable_password_auth
disable_password_auth(sshd_config=SSHD_CONFIG)
with quiet():
assert contains(SSHD_CONFIG, 'PasswordAuthentication no', exact=True)
assert not contains(SSHD_CONFIG, 'PasswordAuthentication yes', exact=True)
def test_disable_root_login(sshd_config):
from fabtools.ssh import disable_root_login
disable_root_login(sshd_config=SSHD_CONFIG)
with quiet():
assert contains(SSHD_CONFIG, 'PermitRootLogin no', exact=True)
assert not contains(SSHD_CONFIG, 'PermitRootLogin yes', exact=True)
def test_enable_password_auth(sshd_config):
from fabtools.ssh import enable_password_auth
enable_password_auth(sshd_config=SSHD_CONFIG)
with quiet():
assert contains(SSHD_CONFIG, 'PasswordAuthentication yes', exact=True)
assert not contains(SSHD_CONFIG, 'PasswordAuthentication no', exact=True)
def test_enable_root_login(sshd_config):
from fabtools.ssh import enable_root_login
enable_root_login(sshd_config=SSHD_CONFIG)
with quiet():
assert contains(SSHD_CONFIG, 'PermitRootLogin yes', exact=True)
assert not contains(SSHD_CONFIG, 'PermitRootLogin no', exact=True)
| bsd-2-clause |
edeposit/edeposit.amqp.aleph | setup.py | 1 | 1678 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from docs import getVersion
# Variables ===================================================================
changelog = open('CHANGES.rst').read()
long_description = "\n\n".join([
open('README.rst').read(),
open('CONTRIBUTORS.rst').read(),
changelog
])
# Package definitions =========================================================
setup(
name='edeposit.amqp.aleph',
version=getVersion(changelog),
description="E-Deposit AMQP module providing communication with Aleph",
long_description=long_description,
url='https://github.com/edeposit/edeposit.amqp.aleph',
author='Edeposit team',
author_email='edeposit@email.cz',
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=[
'edeposit',
'edeposit.amqp'
],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
"pyDHTMLParser>=2.0.7",
"httpkie>=1.1.0",
"isbn_validator",
"remove_hairs",
"marcxml_parser>=1.1.7",
],
extras_require={
"test": [
"unittest2",
"robotsuite",
"pytest",
"mock",
"robotframework-httplibrary"
],
"docs": [
"sphinxcontrib-robotdoc",
"sphinxcontrib-napoleon",
"sphinx",
]
},
)
| mit |
huntxu/neutron | neutron/db/port_security/models.py | 5 | 2023 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import models_v2
class PortSecurityBinding(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be to able to
# instruct SQLAlchemy to eagerly load port security binding
port = orm.relationship(
models_v2.Port, load_on_pending=True,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
revises_on_change = ('port',)
class NetworkSecurityBinding(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be able to instruct
# SQLAlchemy to eagerly load default port security setting for ports
# on this network
network = orm.relationship(
models_v2.Network, load_on_pending=True,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
revises_on_change = ('network',)
| apache-2.0 |
TheLoneRanger14/Decaf.v2 | scripts/dump-guest-memory.py | 20 | 14302 | # This python script adds a new gdb command, "dump-guest-memory". It
# should be loaded with "source dump-guest-memory.py" at the (gdb)
# prompt.
#
# Copyright (C) 2013, Red Hat, Inc.
#
# Authors:
# Laszlo Ersek <lersek@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later. See
# the COPYING file in the top-level directory.
#
# The leading docstring doesn't have idiomatic Python formatting. It is
# printed by gdb's "help" command (the first line is printed in the
# "help data" summary), and it should match how other help texts look in
# gdb.
import struct
class DumpGuestMemory(gdb.Command):
"""Extract guest vmcore from qemu process coredump.
The sole argument is FILE, identifying the target file to write the
guest vmcore to.
This GDB command reimplements the dump-guest-memory QMP command in
python, using the representation of guest memory as captured in the qemu
coredump. The qemu process that has been dumped must have had the
command line option "-machine dump-guest-core=on".
For simplicity, the "paging", "begin" and "end" parameters of the QMP
command are not supported -- no attempt is made to get the guest's
internal paging structures (ie. paging=false is hard-wired), and guest
memory is always fully dumped.
Only x86_64 guests are supported.
The CORE/NT_PRSTATUS and QEMU notes (that is, the VCPUs' statuses) are
not written to the vmcore. Preparing these would require context that is
only present in the KVM host kernel module when the guest is alive. A
fake ELF note is written instead, only to keep the ELF parser of "crash"
happy.
Dependent on how busted the qemu process was at the time of the
coredump, this command might produce unpredictable results. If qemu
deliberately called abort(), or it was dumped in response to a signal at
a halfway fortunate point, then its coredump should be in reasonable
shape and this command should mostly work."""
TARGET_PAGE_SIZE = 0x1000
TARGET_PAGE_MASK = 0xFFFFFFFFFFFFF000
# Various ELF constants
EM_X86_64 = 62 # AMD x86-64 target machine
ELFDATA2LSB = 1 # little endian
ELFCLASS64 = 2
ELFMAG = "\x7FELF"
EV_CURRENT = 1
ET_CORE = 4
PT_LOAD = 1
PT_NOTE = 4
# Special value for e_phnum. This indicates that the real number of
# program headers is too large to fit into e_phnum. Instead the real
# value is in the field sh_info of section 0.
PN_XNUM = 0xFFFF
# Format strings for packing and header size calculation.
ELF64_EHDR = ("4s" # e_ident/magic
"B" # e_ident/class
"B" # e_ident/data
"B" # e_ident/version
"B" # e_ident/osabi
"8s" # e_ident/pad
"H" # e_type
"H" # e_machine
"I" # e_version
"Q" # e_entry
"Q" # e_phoff
"Q" # e_shoff
"I" # e_flags
"H" # e_ehsize
"H" # e_phentsize
"H" # e_phnum
"H" # e_shentsize
"H" # e_shnum
"H" # e_shstrndx
)
ELF64_PHDR = ("I" # p_type
"I" # p_flags
"Q" # p_offset
"Q" # p_vaddr
"Q" # p_paddr
"Q" # p_filesz
"Q" # p_memsz
"Q" # p_align
)
def __init__(self):
super(DumpGuestMemory, self).__init__("dump-guest-memory",
gdb.COMMAND_DATA,
gdb.COMPLETE_FILENAME)
self.uintptr_t = gdb.lookup_type("uintptr_t")
self.elf64_ehdr_le = struct.Struct("<%s" % self.ELF64_EHDR)
self.elf64_phdr_le = struct.Struct("<%s" % self.ELF64_PHDR)
def int128_get64(self, val):
assert (val["hi"] == 0)
return val["lo"]
def qlist_foreach(self, head, field_str):
var_p = head["lh_first"]
while (var_p != 0):
var = var_p.dereference()
yield var
var_p = var[field_str]["le_next"]
def qemu_get_ram_block(self, ram_addr):
ram_blocks = gdb.parse_and_eval("ram_list.blocks")
for block in self.qlist_foreach(ram_blocks, "next"):
if (ram_addr - block["offset"] < block["length"]):
return block
raise gdb.GdbError("Bad ram offset %x" % ram_addr)
def qemu_get_ram_ptr(self, ram_addr):
block = self.qemu_get_ram_block(ram_addr)
return block["host"] + (ram_addr - block["offset"])
def memory_region_get_ram_ptr(self, mr):
if (mr["alias"] != 0):
return (self.memory_region_get_ram_ptr(mr["alias"].dereference()) +
mr["alias_offset"])
return self.qemu_get_ram_ptr(mr["ram_addr"] & self.TARGET_PAGE_MASK)
def guest_phys_blocks_init(self):
self.guest_phys_blocks = []
def guest_phys_blocks_append(self):
print "guest RAM blocks:"
print ("target_start target_end host_addr message "
"count")
print ("---------------- ---------------- ---------------- ------- "
"-----")
current_map_p = gdb.parse_and_eval("address_space_memory.current_map")
current_map = current_map_p.dereference()
for cur in range(current_map["nr"]):
flat_range = (current_map["ranges"] + cur).dereference()
mr = flat_range["mr"].dereference()
# we only care about RAM
if (not mr["ram"]):
continue
section_size = self.int128_get64(flat_range["addr"]["size"])
target_start = self.int128_get64(flat_range["addr"]["start"])
target_end = target_start + section_size
host_addr = (self.memory_region_get_ram_ptr(mr) +
flat_range["offset_in_region"])
predecessor = None
# find continuity in guest physical address space
if (len(self.guest_phys_blocks) > 0):
predecessor = self.guest_phys_blocks[-1]
predecessor_size = (predecessor["target_end"] -
predecessor["target_start"])
# the memory API guarantees monotonically increasing
# traversal
assert (predecessor["target_end"] <= target_start)
# we want continuity in both guest-physical and
# host-virtual memory
if (predecessor["target_end"] < target_start or
predecessor["host_addr"] + predecessor_size != host_addr):
predecessor = None
if (predecessor is None):
# isolated mapping, add it to the list
self.guest_phys_blocks.append({"target_start": target_start,
"target_end" : target_end,
"host_addr" : host_addr})
message = "added"
else:
# expand predecessor until @target_end; predecessor's
# start doesn't change
predecessor["target_end"] = target_end
message = "joined"
print ("%016x %016x %016x %-7s %5u" %
(target_start, target_end, host_addr.cast(self.uintptr_t),
message, len(self.guest_phys_blocks)))
def cpu_get_dump_info(self):
# We can't synchronize the registers with KVM post-mortem, and
# the bits in (first_x86_cpu->env.hflags) seem to be stale; they
# may not reflect long mode for example. Hence just assume the
# most common values. This also means that instruction pointer
# etc. will be bogus in the dump, but at least the RAM contents
# should be valid.
self.dump_info = {"d_machine": self.EM_X86_64,
"d_endian" : self.ELFDATA2LSB,
"d_class" : self.ELFCLASS64}
def encode_elf64_ehdr_le(self):
return self.elf64_ehdr_le.pack(
self.ELFMAG, # e_ident/magic
self.dump_info["d_class"], # e_ident/class
self.dump_info["d_endian"], # e_ident/data
self.EV_CURRENT, # e_ident/version
0, # e_ident/osabi
"", # e_ident/pad
self.ET_CORE, # e_type
self.dump_info["d_machine"], # e_machine
self.EV_CURRENT, # e_version
0, # e_entry
self.elf64_ehdr_le.size, # e_phoff
0, # e_shoff
0, # e_flags
self.elf64_ehdr_le.size, # e_ehsize
self.elf64_phdr_le.size, # e_phentsize
self.phdr_num, # e_phnum
0, # e_shentsize
0, # e_shnum
0 # e_shstrndx
)
def encode_elf64_note_le(self):
return self.elf64_phdr_le.pack(self.PT_NOTE, # p_type
0, # p_flags
(self.memory_offset -
len(self.note)), # p_offset
0, # p_vaddr
0, # p_paddr
len(self.note), # p_filesz
len(self.note), # p_memsz
0 # p_align
)
def encode_elf64_load_le(self, offset, start_hwaddr, range_size):
return self.elf64_phdr_le.pack(self.PT_LOAD, # p_type
0, # p_flags
offset, # p_offset
0, # p_vaddr
start_hwaddr, # p_paddr
range_size, # p_filesz
range_size, # p_memsz
0 # p_align
)
def note_init(self, name, desc, type):
# name must include a trailing NUL
namesz = (len(name) + 1 + 3) / 4 * 4
descsz = (len(desc) + 3) / 4 * 4
fmt = ("<" # little endian
"I" # n_namesz
"I" # n_descsz
"I" # n_type
"%us" # name
"%us" # desc
% (namesz, descsz))
self.note = struct.pack(fmt,
len(name) + 1, len(desc), type, name, desc)
def dump_init(self):
self.guest_phys_blocks_init()
self.guest_phys_blocks_append()
self.cpu_get_dump_info()
# we have no way to retrieve the VCPU status from KVM
# post-mortem
self.note_init("NONE", "EMPTY", 0)
# Account for PT_NOTE.
self.phdr_num = 1
# We should never reach PN_XNUM for paging=false dumps: there's
# just a handful of discontiguous ranges after merging.
self.phdr_num += len(self.guest_phys_blocks)
assert (self.phdr_num < self.PN_XNUM)
# Calculate the ELF file offset where the memory dump commences:
#
# ELF header
# PT_NOTE
# PT_LOAD: 1
# PT_LOAD: 2
# ...
# PT_LOAD: len(self.guest_phys_blocks)
# ELF note
# memory dump
self.memory_offset = (self.elf64_ehdr_le.size +
self.elf64_phdr_le.size * self.phdr_num +
len(self.note))
def dump_begin(self, vmcore):
vmcore.write(self.encode_elf64_ehdr_le())
vmcore.write(self.encode_elf64_note_le())
running = self.memory_offset
for block in self.guest_phys_blocks:
range_size = block["target_end"] - block["target_start"]
vmcore.write(self.encode_elf64_load_le(running,
block["target_start"],
range_size))
running += range_size
vmcore.write(self.note)
def dump_iterate(self, vmcore):
qemu_core = gdb.inferiors()[0]
for block in self.guest_phys_blocks:
cur = block["host_addr"]
left = block["target_end"] - block["target_start"]
print ("dumping range at %016x for length %016x" %
(cur.cast(self.uintptr_t), left))
while (left > 0):
chunk_size = min(self.TARGET_PAGE_SIZE, left)
chunk = qemu_core.read_memory(cur, chunk_size)
vmcore.write(chunk)
cur += chunk_size
left -= chunk_size
def create_vmcore(self, filename):
vmcore = open(filename, "wb")
self.dump_begin(vmcore)
self.dump_iterate(vmcore)
vmcore.close()
def invoke(self, args, from_tty):
# Unwittingly pressing the Enter key after the command should
# not dump the same multi-gig coredump to the same file.
self.dont_repeat()
argv = gdb.string_to_argv(args)
if (len(argv) != 1):
raise gdb.GdbError("usage: dump-guest-memory FILE")
self.dump_init()
self.create_vmcore(argv[0])
DumpGuestMemory()
| gpl-2.0 |
drummonds/pySage50 | setup.py | 1 | 4088 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
import platform
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
# self.test_args = []
# self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Add the current directory to the module search path.
sys.path.append('.')
# # Constants
CODE_DIRECTORY = 'pysage50'
# DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
#DATA_DIRECTORY = 'gnucash_books'
PYTEST_FLAGS = ['--doctest-modules']
# define install_requires for specific Python versions
python_version_specific_requires = []
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
# Import metadata. Normally this would just be:
#
# from luca import metadata
#
# However, when we do this, we also import `luca/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
# if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
# python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.md'),
keywords=['Sage', 'python', 'binding', 'interface', ],
license='MIT',
platforms='any',
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 3 - pre Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Office/Business',
'Topic :: Office/Business :: Financial',
'Topic :: Office/Business :: Financial :: Accounting',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=(TESTS_DIRECTORY, )),
install_requires=[
# 'sqlite3',
# 'pandas',
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest',
'py',
],
# console=['scripts/piecash_ledger.py','scripts/piecash_toqif.py'],
scripts=[],
cmdclass = {'test': PyTest},
test_suite="tests",
zip_safe=False, # don't use eggs
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| mit |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/string.py | 78 | 10444 | """A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
auto_arg_index=0):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field_names are given.
if field_name == '':
if auto_arg_index is False:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1,
auto_arg_index=auto_arg_index)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.