gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/Users/alicetopping/dev/nyu-python/course3/assignments/flaskr/my_project/bin/python
import datetime
import sys
from getpass import getpass
from optparse import OptionParser
from peewee import *
from peewee import print_
from peewee import __version__ as peewee_version
from playhouse.reflection import *
TEMPLATE = """from peewee import *%s
database = %s('%s', **%s)
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
class Meta:
database = database
"""
DATABASE_ALIASES = {
MySQLDatabase: ['mysql', 'mysqldb'],
PostgresqlDatabase: ['postgres', 'postgresql'],
SqliteDatabase: ['sqlite', 'sqlite3'],
}
DATABASE_MAP = dict((value, key)
for key in DATABASE_ALIASES
for value in DATABASE_ALIASES[key])
def make_introspector(database_type, database_name, **kwargs):
if database_type not in DATABASE_MAP:
err('Unrecognized database, must be one of: %s' %
', '.join(DATABASE_MAP.keys()))
sys.exit(1)
schema = kwargs.pop('schema', None)
DatabaseClass = DATABASE_MAP[database_type]
db = DatabaseClass(database_name, **kwargs)
return Introspector.from_database(db, schema=schema)
def print_models(introspector, tables=None, preserve_order=False):
database = introspector.introspect(table_names=tables)
print_(TEMPLATE % (
introspector.get_additional_imports(),
introspector.get_database_class().__name__,
introspector.get_database_name(),
repr(introspector.get_database_kwargs())))
def _print_table(table, seen, accum=None):
accum = accum or []
foreign_keys = database.foreign_keys[table]
for foreign_key in foreign_keys:
dest = foreign_key.dest_table
# In the event the destination table has already been pushed
# for printing, then we have a reference cycle.
if dest in accum and table not in accum:
print_('# Possible reference cycle: %s' % dest)
# If this is not a self-referential foreign key, and we have
# not already processed the destination table, do so now.
if dest not in seen and dest not in accum:
seen.add(dest)
if dest != table:
_print_table(dest, seen, accum + [table])
print_('class %s(BaseModel):' % database.model_names[table])
columns = database.columns[table].items()
if not preserve_order:
columns = sorted(columns)
primary_keys = database.primary_keys[table]
for name, column in columns:
skip = all([
name in primary_keys,
name == 'id',
len(primary_keys) == 1,
column.field_class in introspector.pk_classes])
if skip:
continue
if column.primary_key and len(primary_keys) > 1:
# If we have a CompositeKey, then we do not want to explicitly
# mark the columns as being primary keys.
column.primary_key = False
print_(' %s' % column.get_field())
print_('')
print_(' class Meta:')
print_(' db_table = \'%s\'' % table)
multi_column_indexes = database.multi_column_indexes(table)
if multi_column_indexes:
print_(' indexes = (')
for fields, unique in sorted(multi_column_indexes):
print_(' ((%s), %s),' % (
', '.join("'%s'" % field for field in fields),
unique,
))
print_(' )')
if introspector.schema:
print_(' schema = \'%s\'' % introspector.schema)
if len(primary_keys) > 1:
pk_field_names = sorted([
field.name for col, field in columns
if col in primary_keys])
pk_list = ', '.join("'%s'" % pk for pk in pk_field_names)
print_(' primary_key = CompositeKey(%s)' % pk_list)
elif not primary_keys:
print_(' primary_key = False')
print_('')
seen.add(table)
seen = set()
for table in sorted(database.model_names.keys()):
if table not in seen:
if not tables or table in tables:
_print_table(table, seen)
def print_header(cmd_line, introspector):
timestamp = datetime.datetime.now()
print_('# Code generated by:')
print_('# python -m pwiz %s' % cmd_line)
print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p'))
print_('# Database: %s' % introspector.get_database_name())
print_('# Peewee version: %s' % peewee_version)
print_('')
def err(msg):
sys.stderr.write('\033[91m%s\033[0m\n' % msg)
sys.stderr.flush()
def get_option_parser():
parser = OptionParser(usage='usage: %prog [options] database_name')
ao = parser.add_option
ao('-H', '--host', dest='host')
ao('-p', '--port', dest='port', type='int')
ao('-u', '--user', dest='user')
ao('-P', '--password', dest='password', action='store_true')
engines = sorted(DATABASE_MAP)
ao('-e', '--engine', dest='engine', default='postgresql', choices=engines,
help=('Database type, e.g. sqlite, mysql or postgresql. Default '
'is "postgresql".'))
ao('-s', '--schema', dest='schema')
ao('-t', '--tables', dest='tables',
help=('Only generate the specified tables. Multiple table names should '
'be separated by commas.'))
ao('-i', '--info', dest='info', action='store_true',
help=('Add database information and other metadata to top of the '
'generated file.'))
ao('-o', '--preserve-order', action='store_true', dest='preserve_order',
help='Model definition column ordering matches source table.')
return parser
def get_connect_kwargs(options):
ops = ('host', 'port', 'user', 'schema')
kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o))
if options.password:
kwargs['password'] = getpass()
return kwargs
if __name__ == '__main__':
raw_argv = sys.argv
parser = get_option_parser()
options, args = parser.parse_args()
if options.preserve_order:
try:
from collections import OrderedDict
except ImportError:
err('Preserve order requires Python >= 2.7.')
sys.exit(1)
if len(args) < 1:
err('Missing required parameter "database"')
parser.print_help()
sys.exit(1)
connect = get_connect_kwargs(options)
database = args[-1]
tables = None
if options.tables:
tables = [table.strip() for table in options.tables.split(',')
if table.strip()]
introspector = make_introspector(options.engine, database, **connect)
if options.info:
cmd_line = ' '.join(raw_argv[1:])
print_header(cmd_line, introspector)
print_models(introspector, tables, preserve_order=options.preserve_order)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script goes over multiple pages of the home wiki, and reports invalid
ISBN numbers.
Additionally, it can convert all ISBN-10 codes to the ISBN-13 format, and
correct the ISBN format by placing hyphens.
These command line parameters can be used to specify which pages to work on:
¶ms;
-namespace:n Number or name of namespace to process. The parameter can be
used multiple times. It works in combination with all other
parameters, except for the -start parameter. If you e.g.
want to iterate over all categories starting at M, use
-start:Category:M.
Furthermore, the following command line parameters are supported:
-to13 Converts all ISBN-10 codes to ISBN-13.
NOTE: This needn't be done, as MediaWiki still supports
(and will keep supporting) ISBN-10, and all libraries and
bookstores will most likely do so as well.
-format Corrects the hyphenation.
NOTE: This is in here for testing purposes only. Usually
it's not worth to create an edit for such a minor issue.
The recommended way of doing this is enabling
cosmetic_changes, so that these changes are made on-the-fly
to all pages that are modified.
-always Don't prompt you for each replacement.
"""
__version__='$Id$'
import pywikibot
from pywikibot import pagegenerators, i18n
import sys, re
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
# Maps each group number to the list of its publisher number ranges.
# Taken from http://www.isbn-international.org/converter/ranges.htm
ranges = {
'0': [ # English speaking area
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('900000', '949999'),
('9500000', '9999999'),
],
'1': [ # English speaking area
('00', '09'),
('100', '399'),
('4000', '5499'),
('55000', '86979'),
('869800', '998999'),
],
'2': [ # French speaking area
('00', '19'),
('200', '349'),
('35000', '39999'),
('400', '699'),
('7000', '8399'),
('84000', '89999'),
('900000', '949999'),
('9500000', '9999999'),
],
'3': [ # German speaking area
('00', '02'),
('030', '033'),
('0340', '0369'),
('03700', '03999'),
('04', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('900000', '949999'),
('9500000', '9999999'),
],
'4': [ # Japan
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('900000', '949999'),
('9500000', '9999999'),
],
'5': [ # Russian Federation
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('900000', '909999'),
('91000', '91999'),
('9200', '9299'),
('93000', '94999'),
('9500', '9799'),
('98000', '98999'),
('9900000', '9909999'),
('9910', '9999'),
],
'600': [ # Iran
('00', '09'),
('100', '499'),
('5000', '8999'),
('90000', '99999'),
],
'601': [ # Kazakhstan
('00', '19'),
('200', '699'),
('7000', '7999'),
('80000', '84999'),
('85', '99'),
],
'602': [ # Indonesia
('00', '19'),
('200', '799'),
('8000', '9499'),
('95000', '99999'),
],
'603': [ # Saudi Arabia
('00', '04'),
('500', '799'),
('8000', '8999'),
('90000', '99999'),
],
'604': [ # Vietnam
('0', '4'),
('50', '89'),
('900', '979'),
('9800', '9999'),
],
'605': [ # Turkey
('00', '09'),
('100', '399'),
('4000', '5999'),
('60000', '89999'),
],
'7': [ # China, People's Republic
('00', '09'),
('100', '499'),
('5000', '7999'),
('80000', '89999'),
('900000', '999999'),
],
'80': [ # Czech Republic; Slovakia
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('900000', '999999'),
],
'81': [ # India
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('900000', '999999'),
],
'82': [ # Norway
('00', '19'),
('200', '699'),
('7000', '8999'),
('90000', '98999'),
('990000', '999999'),
],
'83': [ # Poland
('00', '19'),
('200', '599'),
('60000', '69999'),
('7000', '8499'),
('85000', '89999'),
('900000', '999999'),
],
'84': [ # Spain
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('9000', '9199'),
('920000', '923999'),
('92400', '92999'),
('930000', '949999'),
('95000', '96999'),
('9700', '9999'),
],
'85': [ # Brazil
('00', '19'),
('200', '599'),
('60000', '69999'),
('7000', '8499'),
('85000', '89999'),
('900000', '979999'),
('98000', '99999'),
],
'86': [ # Serbia and Montenegro
('00', '29'),
('300', '599'),
('6000', '7999'),
('80000', '89999'),
('900000', '999999'),
],
'87': [ # Denmark
('00', '29'),
('400', '649'),
('7000', '7999'),
('85000', '94999'),
('970000', '999999'),
],
'88': [ # Italian speaking area
('00', '19'),
('200', '599'),
('6000', '8499'),
('85000', '89999'),
('900000', '949999'),
('95000', '99999'),
],
'89': [ # Korea
('00', '24'),
('250', '549'),
('5500', '8499'),
('85000', '94999'),
('950000', '999999'),
],
'90': [ # Netherlands, Belgium (Flemish)
('00', '19'),
('200', '499'),
('5000', '6999'),
('70000', '79999'),
('800000', '849999'),
('8500', '8999'),
('900000', '909999'),
('940000', '949999'),
],
'91': [ # Sweden
('0', '1'),
('20', '49'),
('500', '649'),
('7000', '7999'),
('85000', '94999'),
('970000', '999999'),
],
'92': [ # International Publishers (Unesco, EU), European Community Organizations
('0', '5'),
('60', '79'),
('800', '899'),
('9000', '9499'),
('95000', '98999'),
('990000', '999999'),
],
'93': [ # India - no ranges fixed yet
],
'950': [ # Argentina
('00', '49'),
('500', '899'),
('9000', '9899'),
('99000', '99999'),
],
'951': [ # Finland
('0', '1'),
('20', '54'),
('550', '889'),
('8900', '9499'),
('95000', '99999'),
],
'952': [ # Finland
('00', '19'),
('200', '499'),
('5000', '5999'),
('60', '65'),
('6600', '6699'),
('67000', '69999'),
('7000', '7999'),
('80', '94'),
('9500', '9899'),
('99000', '99999'),
],
'953': [ # Croatia
('0', '0'),
('10', '14'),
('150', '549'),
('55000', '59999'),
('6000', '9499'),
('95000', '99999'),
],
'954': [ # Bulgaria
('00', '29'),
('300', '799'),
('8000', '8999'),
('90000', '92999'),
('9300', '9999'),
],
'955': [ # Sri Lanka
('0', '0'),
('1000', '1999'),
('20', '54'),
('550', '799'),
('8000', '9499'),
('95000', '99999'),
],
'956': [ # Chile
('00', '19'),
('200', '699'),
('7000', '9999'),
],
'957': [ # Taiwan, China
('00', '02'),
('0300', '0499'),
('05', '19'),
('2000', '2099'),
('21', '27'),
('28000', '30999'),
('31', '43'),
('440', '819'),
('8200', '9699'),
('97000', '99999'),
],
'958': [ # Colombia
('00', '59'),
('600', '799'),
('8000', '9499'),
('95000', '99999'),
],
'959': [ # Cuba
('00', '19'),
('200', '699'),
('7000', '8499'),
],
'960': [ # Greece
('00', '19'),
('200', '659'),
('6600', '6899'),
('690', '699'),
('7000', '8499'),
('85000', '99999'),
],
'961': [ # Slovenia
('00', '19'),
('200', '599'),
('6000', '8999'),
('90000', '94999'),
],
'962': [ # Hong Kong
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '86999'),
('8700', '8999'),
('900', '999'),
],
'963': [ # Hungary
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('9000', '9999'),
],
'964': [ # Iran
('00', '14'),
('150', '249'),
('2500', '2999'),
('300', '549'),
('5500', '8999'),
('90000', '96999'),
('970', '989'),
('9900', '9999'),
],
'965': [ # Israel
('00', '19'),
('200', '599'),
('7000', '7999'),
('90000', '99999'),
],
'966': [ # Ukraine
('00', '19'),
('2000', '2999'),
('300', '699'),
('7000', '8999'),
('90000', '99999'),
],
'967': [ # Malaysia
('00', '29'),
('300', '499'),
('5000', '5999'),
('60', '89'),
('900', '989'),
('9900', '9989'),
('99900', '99999'),
],
'968': [ # Mexico
('01', '39'),
('400', '499'),
('5000', '7999'),
('800', '899'),
('9000', '9999'),
],
'969': [ # Pakistan
('0', '1'),
('20', '39'),
('400', '799'),
('8000', '9999'),
],
'970': [ # Mexico
('01', '59'),
('600', '899'),
('9000', '9099'),
('91000', '96999'),
('9700', '9999'),
],
'971': [ #Philippines?
('000', '019'),
('02', '02'),
('0300', '0599'),
('06', '09'),
('10', '49'),
('500', '849'),
('8500', '9099'),
('91000', '99999'),
],
'972': [ # Portugal
('0', '1'),
('20', '54'),
('550', '799'),
('8000', '9499'),
('95000', '99999'),
],
'973': [ # Romania
('0', '0'),
('100', '169'),
('1700', '1999'),
('20', '54'),
('550', '759'),
('7600', '8499'),
('85000', '88999'),
('8900', '9499'),
('95000', '99999'),
],
'974': [ # Thailand
('00', '19'),
('200', '699'),
('7000', '8499'),
('85000', '89999'),
('90000', '94999'),
('9500', '9999'),
],
'975': [ # Turkey
('00000', '00999'),
('01', '24'),
('250', '599'),
('6000', '9199'),
('92000', '98999'),
('990', '999'),
],
'976': [ # Caribbean Community
('0', '3'),
('40', '59'),
('600', '799'),
('8000', '9499'),
('95000', '99999'),
],
'977': [ # Egypt
('00', '19'),
('200', '499'),
('5000', '6999'),
('700', '999'),
],
'978': [ # Nigeria
('000', '199'),
('2000', '2999'),
('30000', '79999'),
('8000', '8999'),
('900', '999'),
],
'979': [ # Indonesia
('000', '099'),
('1000', '1499'),
('15000', '19999'),
('20', '29'),
('3000', '3999'),
('400', '799'),
('8000', '9499'),
('95000', '99999'),
],
'980': [ # Venezuela
('00', '19'),
('200', '599'),
('6000', '9999'),
],
'981': [ # Singapore
('00', '19'),
('200', '299'),
('3000', '9999'),
],
'982': [ # South Pacific
('00', '09'),
('100', '699'),
('70', '89'),
('9000', '9999'),
],
'983': [ # Malaysia
('00', '01'),
('020', '199'),
('2000', '3999'),
('40000', '44999'),
('45', '49'),
('50', '79'),
('800', '899'),
('9000', '9899'),
('99000', '99999'),
],
'984': [ # Bangladesh
('00', '39'),
('400', '799'),
('8000', '8999'),
('90000', '99999'),
],
'985': [ # Belarus
('00', '39'),
('400', '599'),
('6000', '8999'),
('90000', '99999'),
],
'986': [ # Taiwan, China
('00', '11'),
('120', '559'),
('5600', '7999'),
('80000', '99999'),
],
'987': [ # Argentina
('00', '09'),
('1000', '1999'),
('20000', '29999'),
('30', '49'),
('500', '899'),
('9000', '9499'),
('95000', '99999'),
],
'988': [ # Hongkong
('00', '16'),
('17000', '19999'),
('200', '799'),
('8000', '9699'),
('97000', '99999'),
],
'989': [ # Portugal
('0', '1'),
('20', '54'),
('550', '799'),
('8000', '9499'),
('95000', '99999'),
],
'9937': [ # Nepal
('0', '2'),
('30', '49'),
('500', '799'),
('8000', '9999'),
],
'9938': [ # Tunisia
('00', '79'),
('800', '949'),
('9500', '9999'),
],
'9939': [ # Armenia
('0', '4'),
('50', '79'),
('800', '899'),
('9000', '9999'),
],
'9940': [ # Montenegro
('0', '1'),
('20', '49'),
('500', '899'),
('9000', '9999'),
],
'9941': [ # Georgia
('0', '0'),
('10', '39'),
('400', '899'),
('9000', '9999'),
],
'9942': [ # Ecuador
('00', '89'),
('900', '994'),
('9950', '9999'),
],
'9943': [ # Uzbekistan
('00', '29'),
('300', '399'),
('4000', '9999'),
],
'9944': [ # Turkey
('0', '2'),
('300', '499'),
('5000', '5999'),
('60', '89'),
('900', '999'),
],
'9945': [ # Dominican Republic
('00', '00'),
('010', '079'),
('08', '39'),
('400', '569'),
('57', '57'),
('580', '849'),
('8500', '9999'),
],
'9946': [ # Korea, P.D.R.
('0', '1'),
('20', '39'),
('400', '899'),
('9000', '9999'),
],
'9947': [ # Algeria
('0', '1'),
('20', '79'),
('800', '999'),
],
'9948': [ # United Arab Emirates
('00', '39'),
('400', '849'),
('8500', '9999'),
],
'9949': [ # Estonia
('0', '0'),
('10', '39'),
('400', '899'),
('9000', '9999'),
],
'9950': [ # Palestine
('00', '29'),
('300', '840'),
('8500', '9999'),
],
'9951': [ # Kosova
('00', '39'),
('400', '849'),
('8500', '9999'),
],
'9952': [ # Azerbaijan
('0', '1'),
('20', '39'),
('400', '799'),
('8000', '9999'),
],
'9953': [ # Lebanon
('0', '0'),
('10', '39'),
('400', '599'),
('60', '89'),
('9000', '9999'),
],
'9954': [ # Morocco
('0', '1'),
('20', '39'),
('400', '799'),
('8000', '9999'),
],
'9955': [ # Lithuania
('00', '39'),
('400', '929'),
('9300', '9999'),
],
'9956': [ # Cameroon
('0', '0'),
('10', '39'),
('400', '899'),
('9000', '9999'),
],
'9957': [ # Jordan
('00', '39'),
('400', '699'),
('70', '84'),
('8500', '9999'),
],
'9958': [ # Bosnia and Herzegovina
('0', '0'),
('10', '49'),
('500', '899'),
('9000', '9999'),
],
'9959': [ # Libya
('0', '1'),
('20', '79'),
('800', '949'),
('9500', '9999'),
],
'9960': [ # Saudi Arabia
('00', '59'),
('600', '899'),
('9000', '9999'),
],
'9961': [ # Algeria
('0', '2'),
('30', '69'),
('700', '949'),
('9500', '9999'),
],
'9962': [ # Panama
('00', '54'),
('5500', '5599'),
('56', '59'),
('600', '849'),
('8500', '9999'),
],
'9963': [ # Cyprus
('0', '2'),
('30', '54'),
('550', '749'),
('7500', '9999'),
],
'9964': [ # Ghana
('0', '6'),
('70', '94'),
('950', '999'),
],
'9965': [ # Kazakhstan
('00', '39'),
('400', '899'),
('9000', '9999'),
],
'9966': [ # Kenya
('00', '69'),
('7000', '7499'),
('750', '959'),
('9600', '9999'),
],
'9967': [ # Kyrgyzstan
('00', '39'),
('400', '899'),
('9000', '9999'),
],
'9968': [ # Costa Rica
('00', '49'),
('500', '939'),
('9400', '9999'),
],
'9970': [ # Uganda
('00', '39'),
('400', '899'),
('9000', '9999'),
],
'9971': [ # Singapore
('0', '5'),
('60', '89'),
('900', '989'),
('9900', '9999'),
],
'9972': [ # Peru
('00', '09'),
('1', '1'),
('200', '249'),
('2500', '2999'),
('30', '59'),
('600', '899'),
('9000', '9999'),
],
'9973': [ # Tunisia
('0', '05'),
('060', '089'),
('0900', '0999'),
('10', '69'),
('700', '969'),
('9700', '9999'),
],
'9974': [ # Uruguay
('0', '2'),
('30', '54'),
('550', '749'),
('7500', '9499'),
('95', '99'),
],
'9975': [ # Moldova
('0', '0'),
('100', '399'),
('4000', '4499'),
('45', '89'),
('900', '949'),
('9500', '9999'),
],
'9976': [ # Tanzania
('0', '5'),
('60', '89'),
('900', '989'),
('9990', '9999'),
],
'9977': [ # Costa Rica
('00', '89'),
('900', '989'),
('9900', '9999'),
],
'9978': [ # Ecuador
('00', '29'),
('300', '399'),
('40', '94'),
('950', '989'),
('9900', '9999'),
],
'9979': [ # Iceland
('0', '4'),
('50', '64'),
('650', '659'),
('66', '75'),
('760', '899'),
('9000', '9999'),
],
'9980': [ # Papua New Guinea
('0', '3'),
('40', '89'),
('900', '989'),
('9900', '9999'),
],
'9981': [ # Morocco
('00', '09'),
('100', '159'),
('1600', '1999'),
('20', '79'),
('800', '949'),
('9500', '9999'),
],
'9982': [ # Zambia
('00', '79'),
('800', '989'),
('9900', '9999'),
],
'9983': [ # Gambia
('80', '94'),
('950', '989'),
('9900', '9999'),
],
'9984': [ # Latvia
('00', '49'),
('500', '899'),
('9000', '9999'),
],
'9985': [ # Estonia
('0', '4'),
('50', '79'),
('800', '899'),
('9000', '9999'),
],
'9986': [ # Lithuania
('00', '39'),
('400', '899'),
('9000', '9399'),
('940', '969'),
('97', '99'),
],
'9987': [ # Tanzania
('00', '39'),
('400', '879'),
('8800', '9999'),
],
'9988': [ # Ghana
('0', '2'),
('30', '54'),
('550', '749'),
('7500', '9999'),
],
'9989': [ # Macedonia
('0', '0'),
('100', '199'),
('2000', '2999'),
('30', '59'),
('600', '949'),
('9500', '9999'),
],
'99901': [ # Bahrain
('00', '49'),
('500', '799'),
('80', '99'),
],
'99902': [ # Gabon - no ranges fixed yet
],
'99903': [ # Mauritius
('0', '1'),
('20', '89'),
('900', '999'),
],
'99904': [ # Netherlands Antilles; Aruba, Neth. Ant
('0', '5'),
('60', '89'),
('900', '999'),
],
'99905': [ # Bolivia
('0', '3'),
('40', '79'),
('800', '999'),
],
'99906': [ # Kuwait
('0', '2'),
('30', '59'),
('600', '699'),
('70', '89'),
('9', '9'),
],
'99908': [ # Malawi
('0', '0'),
('10', '89'),
('900', '999'),
],
'99909': [ # Malta
('0', '3'),
('40', '94'),
('950', '999'),
],
'99910': [ # Sierra Leone
('0', '2'),
('30', '89'),
('900', '999'),
],
'99911': [ # Lesotho
('00', '59'),
('600', '999'),
],
'99912': [ # Botswana
('0', '3'),
('400', '599'),
('60', '89'),
('900', '999'),
],
'99913': [ # Andorra
('0', '2'),
('30', '35'),
('600', '604'),
],
'99914': [ # Suriname
('0', '4'),
('50', '89'),
('900', '949'),
],
'99915': [ # Maldives
('0', '4'),
('50', '79'),
('800', '999'),
],
'99916': [ # Namibia
('0', '2'),
('30', '69'),
('700', '999'),
],
'99917': [ # Brunei Darussalam
('0', '2'),
('30', '89'),
('900', '999'),
],
'99918': [ # Faroe Islands
('0', '3'),
('40', '79'),
('800', '999'),
],
'99919': [ # Benin
('0', '2'),
('40', '69'),
('900', '999'),
],
'99920': [ # Andorra
('0', '4'),
('50', '89'),
('900', '999'),
],
'99921': [ # Qatar
('0', '1'),
('20', '69'),
('700', '799'),
('8', '8'),
('90', '99'),
],
'99922': [ # Guatemala
('0', '3'),
('40', '69'),
('700', '999'),
],
'99923': [ # El Salvador
('0', '1'),
('20', '79'),
('800', '999'),
],
'99924': [ # Nicaragua
('0', '2'),
('30', '79'),
('800', '999'),
],
'99925': [ # Paraguay
('0', '3'),
('40', '79'),
('800', '999'),
],
'99926': [ # Honduras
('0', '0'),
('10', '59'),
('600', '999'),
],
'99927': [ # Albania
('0', '2'),
('30', '59'),
('600', '999'),
],
'99928': [ # Georgia
('0', '0'),
('10', '79'),
('800', '999'),
],
'99929': [ # Mongolia
('0', '4'),
('50', '79'),
('800', '999'),
],
'99930': [ # Armenia
('0', '4'),
('50', '79'),
('800', '999'),
],
'99931': [ # Seychelles
('0', '4'),
('50', '79'),
('800', '999'),
],
'99932': [ # Malta
('0', '0'),
('10', '59'),
('600', '699'),
('7', '7'),
('80', '99'),
],
'99933': [ # Nepal
('0', '2'),
('30', '59'),
('600', '999'),
],
'99934': [ # Dominican Republic
('0', '1'),
('20', '79'),
('800', '999'),
],
'99935': [ # Haiti
('0', '2'),
('7', '8'),
('30', '59'),
('600', '699'),
('90', '99'),
],
'99936': [ # Bhutan
('0', '0'),
('10', '59'),
('600', '999'),
],
'99937': [ # Macau
('0', '1'),
('20', '59'),
('600', '999'),
],
'99938': [ # Srpska
('0', '1'),
('20', '59'),
('600', '899'),
('90', '99'),
],
'99939': [ # Guatemala
('0', '5'),
('60', '89'),
('900', '999'),
],
'99940': [ # Georgia
('0', '0'),
('10', '69'),
('700', '999'),
],
'99941': [ # Armenia
('0', '2'),
('30', '79'),
('800', '999'),
],
'99942': [ # Sudan
('0', '4'),
('50', '79'),
('800', '999'),
],
'99943': [ # Alsbania
('0', '2'),
('30', '59'),
('600', '999'),
],
'99944': [ # Ethiopia
('0', '4'),
('50', '79'),
('800', '999'),
],
'99945': [ # Namibia
('0', '5'),
('60', '89'),
('900', '999'),
],
'99946': [ # Nepal
('0', '2'),
('30', '59'),
('600', '999'),
],
'99947': [ # Tajikistan
('0', '2'),
('30', '69'),
('700', '999'),
],
'99948': [ # Eritrea
('0', '4'),
('50', '79'),
('800', '999'),
],
'99949': [ # Mauritius
('0', '1'),
('20', '89'),
('900', '999'),
],
'99950': [ # Cambodia
('0', '4'),
('50', '79'),
('800', '999'),
],
'99951': [ # Congo - no ranges fixed yet
],
'99952': [ # Mali
('0', '4'),
('50', '79'),
('800', '999'),
],
'99953': [ # Paraguay
('0', '2'),
('30', '79'),
('800', '999'),
],
'99954': [ # Bolivia
('0', '2'),
('30', '69'),
('700', '999'),
],
'99955': [ # Srpska
('0', '1'),
('20', '59'),
('600', '899'),
('90', '99'),
],
'99956': [ # Albania
('00', '59'),
('600', '999'),
],
}
class IsbnBot:
def __init__(self, generator):
self.generator = generator
def run(self):
for page in self.generator:
try:
text = page.get(get_redirect = self.touch_redirects)
# convert ISBN numbers
page.put(text)
except pywikibot.NoPage:
pywikibot.output(u"Page %s does not exist?!"
% page.title(asLink=True))
except pywikibot.IsRedirectPage:
pywikibot.output(u"Page %s is a redirect; skipping."
% page.title(asLink=True))
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked?!"
% page.title(asLink=True))
class InvalidIsbnException(pywikibot.Error):
"""Invalid ISBN"""
class ISBN:
"""
Abstract superclass
"""
def format(self):
"""
Puts hyphens into this ISBN number.
"""
result = ''
rest = ''
for digit in self.digits():
rest += str(digit)
# Determine the prefix (if any)
for prefix in self.possiblePrefixes():
if rest.startswith(prefix):
result += prefix + '-'
rest = rest[len(prefix):]
break
# Determine the group
for groupNumber in ranges.iterkeys():
if rest.startswith(groupNumber):
result += groupNumber + '-'
rest = rest[len(groupNumber):]
publisherRanges = ranges[groupNumber]
break
else:
raise InvalidIsbnException('ISBN %s: group number unknown.' % self.code)
# Determine the publisher
for (start, end) in publisherRanges:
length = len(start) # NOTE: start and end always have equal length
if rest[:length] > start and rest[:length] <= end:
result += rest[:length] + '-'
rest = rest[length:]
break
else:
raise InvalidIsbnException('ISBN %s: publisher number unknown.' % self.code)
# The rest is the item number and the 1-digit checksum.
result += rest[:-1] + '-' + rest[-1]
self.code = result
class ISBN13(ISBN):
def __init__(self, code, checksumMissing = False):
self.code = code
if checksumMissing:
self.code += str(self.calculateChecksum())
self.checkValidity()
def possiblePrefixes(self):
return ['978', '979']
def digits(self):
"""
Returns a list of the digits in the ISBN code.
"""
result = []
for c in self.code:
if c.isdigit():
result.append(int(c))
elif c != '-':
raise InvalidIsbnException('The ISBN %s contains invalid characters.' % self.code)
return result
def checkValidity(self):
if len(self.digits()) != 13:
raise InvalidIsbnException('The ISBN %s is not 13 digits long.' % self.code)
if self.calculateChecksum() != self.digits()[-1]:
raise InvalidIsbnException('The ISBN checksum of %s is incorrect.' % self.code)
def calculateChecksum(self):
# See http://en.wikipedia.org/wiki/ISBN#Check_digit_in_ISBN_13
sum = 0
for i in range(0, 13 - 1, 2):
sum += self.digits()[i]
for i in range(1, 13 - 1, 2):
sum += 3 * self.digits()[i]
return (10 - (sum % 10)) % 10
class ISBN10(ISBN):
def __init__(self, code):
self.code = code
self.checkValidity()
def possiblePrefixes(self):
return []
def digits(self):
"""
Returns a list of the digits and Xs in the ISBN code.
"""
result = []
for c in self.code:
if c.isdigit() or c in 'Xx':
result.append(c)
elif c != '-':
raise InvalidIsbnException('The ISBN %s contains invalid characters.' % self.code)
return result
def checkChecksum(self):
"""
Raises an InvalidIsbnException if the checksum shows that the
ISBN is incorrect.
"""
# See http://en.wikipedia.org/wiki/ISBN#Check_digit_in_ISBN_10
sum = 0
for i in range(0, 9):
sum += (i + 1) * int(self.digits()[i])
#print sum
checksum = sum % 11
#print checksum
lastDigit = self.digits()[-1]
#print lastDigit
if not ((checksum == 10 and lastDigit in 'Xx') or (lastDigit.isdigit() and checksum == int(lastDigit))):
raise InvalidIsbnException('The ISBN checksum of %s is incorrect.' % self.code)
def checkValidity(self):
if len(self.digits()) != 10:
raise InvalidIsbnException('The ISBN %s is not 10 digits long.' % self.code)
if 'X' in self.digits()[:-1] or 'x' in self.digits()[:-1]:
raise InvalidIsbnException('ISBN %s: X is only allowed at the end of the ISBN.' % self.code)
self.checkChecksum()
def toISBN13(self):
"""
Creates a 13-digit ISBN from this 10-digit ISBN by prefixing the GS1
prefix '978' and recalculating the checksum.
The hyphenation structure is taken from the format of the original
ISBN number.
"""
code = '978-' + self.code[:-1]
#cs = self.calculateChecksum()
#code += str(cs)
return ISBN13(code, checksumMissing = True)
def format(self):
# load overridden superclass method
ISBN.format(self)
# capitalize checksum
if self.code[-1] == 'x':
self.code = self.code[:-1] + 'X'
def getIsbn(code):
try:
i = ISBN13(code)
except InvalidIsbnException, e13:
try:
i = ISBN10(code)
except InvalidIsbnException, e10:
raise InvalidIsbnException(u'ISBN-13: %s / ISBN-10: %s' % (e13, e10))
return i
def _hyphenateIsbnNumber(match):
"""
Helper function to deal with a single ISBN
"""
code = match.group('code')
try:
i = getIsbn(code)
except InvalidIsbnException:
# don't change
return code
i.format()
return i.code
def hyphenateIsbnNumbers(text):
isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[\dXx])')
text = isbnR.sub(_hyphenateIsbnNumber, text)
return text
def _isbn10toIsbn13(match):
"""
Helper function to deal with a single ISBN
"""
code = match.group('code')
try:
i = getIsbn(code)
except InvalidIsbnException:
# don't change
return code
i13 = i.toISBN13()
return i13.code
def convertIsbn10toIsbn13(text):
isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[Xx]?)')
text = isbnR.sub(_isbn10toIsbn13, text)
return text
class IsbnBot:
def __init__(self, generator, to13 = False, format = False, always = False):
self.generator = generator
self.to13 = to13
self.format = format
self.always = always
self.isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[Xx]?)')
self.comment = i18n.twtranslate(pywikibot.getSite(), 'isbn-formatting')
def treat(self, page):
try:
oldText = page.get()
for match in self.isbnR.finditer(oldText):
code = match.group('code')
try:
getIsbn(code)
except InvalidIsbnException, e:
pywikibot.output(e)
newText = oldText
if self.to13:
newText = self.isbnR.sub(_isbn10toIsbn13, newText)
if self.format:
newText = self.isbnR.sub(_hyphenateIsbnNumber, newText)
self.save(page, newText)
except pywikibot.NoPage:
pywikibot.output(u"Page %s does not exist?!" % page.title(asLink=True))
except pywikibot.IsRedirectPage:
pywikibot.output(u"Page %s is a redirect; skipping." % page.title(asLink=True))
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked?!" % page.title(asLink=True))
def save(self, page, text):
if text != page.get():
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title())
pywikibot.showDiff(page.get(), text)
if not self.always:
choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No', 'Always yes'], ['y', 'N', 'a'], 'N')
if choice == 'n':
return
elif choice == 'a':
self.always = True
if self.always:
try:
page.put(text, comment=self.comment)
except pywikibot.EditConflict:
pywikibot.output(u'Skipping %s because of edit conflict' % (page.title(),))
except pywikibot.SpamfilterError, e:
pywikibot.output(u'Cannot change %s because of blacklist entry %s' % (page.title(), e.url))
except pywikibot.LockedPage:
pywikibot.output(u'Skipping %s (locked page)' % (page.title(),))
else:
# Save the page in the background. No need to catch exceptions.
page.put(text, comment=self.comment, async=True)
def run(self):
for page in self.generator:
self.treat(page)
def main():
#page generator
gen = None
# This temporary array is used to read the page title if one single
# page to work on is specified by the arguments.
pageTitle = []
# Which namespaces should be processed?
# default to [] which means all namespaces will be processed
namespaces = []
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
genFactory = pagegenerators.GeneratorFactory()
# Never ask before changing a page
always = False
to13 = False
format = False
for arg in pywikibot.handleArgs():
if arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[11:]))
except ValueError:
namespaces.append(arg[11:])
elif arg == '-always':
always = True
elif arg == '-to13':
to13 = True
elif arg == '-format':
format = True
else:
if not genFactory.handleArg(arg):
pageTitle.append(arg)
site = pywikibot.getSite()
site.login()
if pageTitle:
gen = iter([pywikibot.Page(pywikibot.Link(t, site)) for t in pageTitle])
if not gen:
gen = genFactory.getCombinedGenerator()
if not gen:
pywikibot.showHelp('isbn')
else:
if namespaces != []:
gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = IsbnBot(preloadingGen, to13 = to13, format = format, always = always)
bot.run()
if __name__ == "__main__":
try:
main()
finally:
pywikibot.stopme()
| |
from typing import Type, Any, Callable, Union, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from .._internally_replaced_utils import load_state_dict_from_url
from ..utils import _log_api_usage_once
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-f37072fd.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-b627a593.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-0676ba61.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-63fe2227.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-394f9c45.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
| |
"""
Base backends structures.
This module defines base classes needed to define custom OpenID or OAuth
auth services from third parties. This customs must subclass an Auth and
and Backend class, check current implementation for examples.
Also the modules *must* define a BACKENDS dictionary with the backend name
(which is used for URLs matching) and Auth class, otherwise it won't be
enabled.
"""
from urllib2 import Request, HTTPError
from urllib import urlencode
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL, FAILURE
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg, ax
from oauth2 import Consumer as OAuthConsumer, Token, Request as OAuthRequest
from django.contrib.auth import authenticate
from django.contrib.auth.backends import ModelBackend
from django.utils import simplejson
from django.utils.importlib import import_module
from social_auth.models import UserSocialAuth
from social_auth.utils import setting, log, model_to_ctype, ctype_to_model, \
clean_partial_pipeline, url_add_parameters, \
get_random_string, constant_time_compare, \
dsa_urlopen
from social_auth.store import DjangoOpenIDStore
from social_auth.backends.exceptions import StopPipeline, AuthException, \
AuthFailed, AuthCanceled, \
AuthUnknownError, AuthTokenError, \
AuthMissingParameter, \
AuthStateMissing, \
AuthStateForbidden
from social_auth.backends.utils import build_consumer_oauth_request
# OpenID configuration
OLD_AX_ATTRS = [
('http://schema.openid.net/contact/email', 'old_email'),
('http://schema.openid.net/namePerson', 'old_fullname'),
('http://schema.openid.net/namePerson/friendly', 'old_nickname')
]
AX_SCHEMA_ATTRS = [
# Request both the full name and first/last components since some
# providers offer one but not the other.
('http://axschema.org/contact/email', 'email'),
('http://axschema.org/namePerson', 'fullname'),
('http://axschema.org/namePerson/first', 'first_name'),
('http://axschema.org/namePerson/last', 'last_name'),
('http://axschema.org/namePerson/friendly', 'nickname'),
]
SREG_ATTR = [
('email', 'email'),
('fullname', 'fullname'),
('nickname', 'nickname')
]
OPENID_ID_FIELD = 'openid_identifier'
SESSION_NAME = 'openid'
# key for username in user details dict used around, see get_user_details
# method
USERNAME = 'username'
PIPELINE = setting('SOCIAL_AUTH_PIPELINE', (
'social_auth.backends.pipeline.social.social_auth_user',
# Removed by default since it can be a dangerouse behavior that
# could lead to accounts take over.
#'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
))
class SocialAuthBackend(ModelBackend):
"""A django.contrib.auth backend that authenticates the user based on
a authentication provider response"""
name = '' # provider name, it's stored in database
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if not (self.name and kwargs.get(self.name) and 'response' in kwargs):
return None
response = kwargs.get('response')
pipeline = PIPELINE
kwargs = kwargs.copy()
kwargs['backend'] = self
if 'pipeline_index' in kwargs:
pipeline = pipeline[kwargs['pipeline_index']:]
else:
kwargs['details'] = self.get_user_details(response)
kwargs['uid'] = self.get_user_id(kwargs['details'], response)
kwargs['is_new'] = False
out = self.pipeline(pipeline, *args, **kwargs)
if not isinstance(out, dict):
return out
social_user = out.get('social_user')
if social_user:
# define user.social_user attribute to track current social
# account
user = social_user.user
user.social_user = social_user
user.is_new = out.get('is_new')
return user
def pipeline(self, pipeline, *args, **kwargs):
"""Pipeline"""
out = kwargs.copy()
if 'pipeline_index' in kwargs:
base_index = int(kwargs['pipeline_index'])
else:
base_index = 0
for idx, name in enumerate(pipeline):
out['pipeline_index'] = base_index + idx
mod_name, func_name = name.rsplit('.', 1)
try:
mod = import_module(mod_name)
except ImportError:
log('exception', 'Error importing pipeline %s', name)
else:
func = getattr(mod, func_name, None)
if callable(func):
try:
result = func(*args, **out) or {}
except StopPipeline:
# Clean partial pipeline on stop
if 'request' in kwargs:
clean_partial_pipeline(kwargs['request'])
break
if isinstance(result, dict):
out.update(result)
else:
return result
return out
def extra_data(self, user, uid, response, details):
"""Return default blank user extra data"""
return {}
def get_user_id(self, details, response):
"""Must return a unique ID from values returned on details"""
raise NotImplementedError('Implement in subclass')
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{USERNAME: <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
@classmethod
def tokens(cls, instance):
"""Return the tokens needed to authenticate the access to any API the
service might provide. The return value will be a dictionary with the
token type name as key and the token value.
instance must be a UserSocialAuth instance.
"""
if instance.extra_data and 'access_token' in instance.extra_data:
return {
'access_token': instance.extra_data['access_token']
}
else:
return {}
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend
"""
return UserSocialAuth.get_user(user_id)
class OAuthBackend(SocialAuthBackend):
"""OAuth authentication backend base class.
EXTRA_DATA defines a set of name that will be stored in
extra_data field. It must be a list of tuples with
name and alias.
Also settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current backend
name (all uppercase) plus _EXTRA_DATA.
access_token is always stored.
"""
EXTRA_DATA = None
def get_user_id(self, details, response):
"""OAuth providers return an unique user id in response"""
return response['id']
def extra_data(self, user, uid, response, details):
"""Return access_token and extra defined names to store in
extra_data field"""
data = {'access_token': response.get('access_token', '')}
name = self.name.replace('-', '_').upper()
names = (self.EXTRA_DATA or []) + setting(name + '_EXTRA_DATA', [])
for entry in names:
if len(entry) == 2:
(name, alias), discard = entry, False
elif len(entry) == 3:
name, alias, discard = entry
elif len(entry) == 1:
name = alias = entry
else: # ???
continue
value = response.get(name)
if discard and not value:
continue
data[alias] = value
return data
class OpenIDBackend(SocialAuthBackend):
"""Generic OpenID authentication backend"""
name = 'openid'
def get_user_id(self, details, response):
"""Return user unique id provided by service"""
return response.identity_url
def values_from_response(self, response, sreg_names=None, ax_names=None):
"""Return values from SimpleRegistration response or
AttributeExchange response if present.
@sreg_names and @ax_names must be a list of name and aliases
for such name. The alias will be used as mapping key.
"""
values = {}
# Use Simple Registration attributes if provided
if sreg_names:
resp = sreg.SRegResponse.fromSuccessResponse(response)
if resp:
values.update((alias, resp.get(name) or '')
for name, alias in sreg_names)
# Use Attribute Exchange attributes if provided
if ax_names:
resp = ax.FetchResponse.fromSuccessResponse(response)
if resp:
for src, alias in ax_names:
name = alias.replace('old_', '')
values[name] = resp.getSingle(src, '') or values.get(name)
return values
def get_user_details(self, response):
"""Return user details from an OpenID request"""
values = {USERNAME: '', 'email': '', 'fullname': '',
'first_name': '', 'last_name': ''}
# update values using SimpleRegistration or AttributeExchange
# values
values.update(self.values_from_response(response,
SREG_ATTR,
OLD_AX_ATTRS + \
AX_SCHEMA_ATTRS))
fullname = values.get('fullname') or ''
first_name = values.get('first_name') or ''
last_name = values.get('last_name') or ''
if not fullname and first_name and last_name:
fullname = first_name + ' ' + last_name
elif fullname:
try: # Try to split name for django user storage
first_name, last_name = fullname.rsplit(' ', 1)
except ValueError:
last_name = fullname
values.update({'fullname': fullname, 'first_name': first_name,
'last_name': last_name,
USERNAME: values.get(USERNAME) or \
(first_name.title() + last_name.title())})
return values
def extra_data(self, user, uid, response, details):
"""Return defined extra data names to store in extra_data field.
Settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current
backend name (all uppercase) plus _SREG_EXTRA_DATA and
_AX_EXTRA_DATA because values can be returned by SimpleRegistration
or AttributeExchange schemas.
Both list must be a value name and an alias mapping similar to
SREG_ATTR, OLD_AX_ATTRS or AX_SCHEMA_ATTRS
"""
name = self.name.replace('-', '_').upper()
sreg_names = setting(name + '_SREG_EXTRA_DATA')
ax_names = setting(name + '_AX_EXTRA_DATA')
data = self.values_from_response(response, sreg_names, ax_names)
return data
class BaseAuth(object):
"""Base authentication class, new authenticators should subclass
and implement needed methods.
AUTH_BACKEND Authorization backend related with this service
"""
AUTH_BACKEND = None
def __init__(self, request, redirect):
self.request = request
# Use request because some auth providers use POST urls with needed
# GET parameters on it
self.data = request.REQUEST
self.redirect = redirect
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def to_session_dict(self, next_idx, *args, **kwargs):
"""Returns dict to store on session for partial pipeline."""
return {
'next': next_idx,
'backend': self.AUTH_BACKEND.name,
'args': tuple(map(model_to_ctype, args)),
'kwargs': dict((key, model_to_ctype(val))
for key, val in kwargs.iteritems())
}
def from_session_dict(self, session_data, *args, **kwargs):
"""Takes session saved data to continue pipeline and merges with any
new extra argument needed. Returns tuple with next pipeline index
entry, arguments and keyword arguments to continue the process."""
args = args[:] + tuple(map(ctype_to_model, session_data['args']))
kwargs = kwargs.copy()
kwargs.update((key, ctype_to_model(val))
for key, val in session_data['kwargs'].iteritems())
return (session_data['next'], args, kwargs)
def continue_pipeline(self, *args, **kwargs):
"""Continue previous halted pipeline"""
kwargs.update({
'auth': self,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def request_token_extra_arguments(self):
"""Return extra arguments needed on request-token process,
setting is per backend and defined by:
<backend name in uppercase>_REQUEST_TOKEN_EXTRA_ARGUMENTS.
"""
backend_name = self.AUTH_BACKEND.name.upper().replace('-', '_')
return setting(backend_name + '_REQUEST_TOKEN_EXTRA_ARGUMENTS', {})
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process, setting is per
backend and defined by:
<backend name in uppercase>_AUTH_EXTRA_ARGUMENTS.
"""
backend_name = self.AUTH_BACKEND.name.upper().replace('-', '_')
return setting(backend_name + '_AUTH_EXTRA_ARGUMENTS', {})
@property
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
@classmethod
def enabled(cls):
"""Return backend enabled status, all enabled by default"""
return True
def disconnect(self, user, association_id=None):
"""Deletes current backend from user if associated.
Override if extra operations are needed.
"""
if association_id:
UserSocialAuth.get_social_auth_for_user(user)\
.get(id=association_id).delete()
else:
UserSocialAuth.get_social_auth_for_user(user)\
.filter(provider=self.AUTH_BACKEND.name).delete()
def build_absolute_uri(self, path=None):
"""Build absolute URI for given path. Replace http:// schema with
https:// if SOCIAL_AUTH_REDIRECT_IS_HTTPS is defined.
"""
uri = self.request.build_absolute_uri(path)
if setting('SOCIAL_AUTH_REDIRECT_IS_HTTPS'):
uri = uri.replace('http://', 'https://')
return uri
class OpenIdAuth(BaseAuth):
"""OpenId process handling"""
AUTH_BACKEND = OpenIDBackend
def auth_url(self):
"""Return auth URL returned by service"""
openid_request = self.setup_request(self.auth_extra_arguments())
# Construct completion URL, including page we should redirect to
return_to = self.build_absolute_uri(self.redirect)
return openid_request.redirectURL(self.trust_root(), return_to)
def auth_html(self):
"""Return auth HTML returned by service"""
openid_request = self.setup_request(self.auth_extra_arguments())
return_to = self.build_absolute_uri(self.redirect)
form_tag = {'id': 'openid_message'}
return openid_request.htmlMarkup(self.trust_root(), return_to,
form_tag_attrs=form_tag)
def trust_root(self):
"""Return trust-root option"""
return setting('OPENID_TRUST_ROOT') or self.build_absolute_uri('/')
def continue_pipeline(self, *args, **kwargs):
"""Continue previous halted pipeline"""
response = self.consumer().complete(dict(self.data.items()),
self.build_absolute_uri())
kwargs.update({
'auth': self,
'response': response,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Complete auth process"""
response = self.consumer().complete(dict(self.data.items()),
self.build_absolute_uri())
if not response:
raise AuthException(self, 'OpenID relying party endpoint')
elif response.status == SUCCESS:
kwargs.update({
'auth': self,
'response': response,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
elif response.status == FAILURE:
raise AuthFailed(self, response.message)
elif response.status == CANCEL:
raise AuthCanceled(self)
else:
raise AuthUnknownError(self, response.status)
def setup_request(self, extra_params=None):
"""Setup request"""
openid_request = self.openid_request(extra_params)
# Request some user details. Use attribute exchange if provider
# advertises support.
if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri):
fetch_request = ax.FetchRequest()
# Mark all attributes as required, Google ignores optional ones
for attr, alias in (AX_SCHEMA_ATTRS + OLD_AX_ATTRS):
fetch_request.add(ax.AttrInfo(attr, alias=alias,
required=True))
else:
fetch_request = sreg.SRegRequest(optional=dict(SREG_ATTR).keys())
openid_request.addExtension(fetch_request)
return openid_request
def consumer(self):
"""Create an OpenID Consumer object for the given Django request."""
return Consumer(self.request.session.setdefault(SESSION_NAME, {}),
DjangoOpenIDStore())
@property
def uses_redirect(self):
"""Return true if openid request will be handled with redirect or
HTML content will be returned.
"""
return self.openid_request().shouldSendRedirect()
def openid_request(self, extra_params=None):
"""Return openid request"""
try:
return self.consumer().begin(url_add_parameters(self.openid_url(),
extra_params))
except DiscoveryFailure, err:
raise AuthException(self, 'OpenID discovery error: %s' % err)
def openid_url(self):
"""Return service provider URL.
This base class is generic accepting a POST parameter that specifies
provider URL."""
if OPENID_ID_FIELD not in self.data:
raise AuthMissingParameter(self, OPENID_ID_FIELD)
return self.data[OPENID_ID_FIELD]
class BaseOAuth(BaseAuth):
"""OAuth base class"""
SETTINGS_KEY_NAME = ''
SETTINGS_SECRET_NAME = ''
def __init__(self, request, redirect):
"""Init method"""
super(BaseOAuth, self).__init__(request, redirect)
self.redirect_uri = self.build_absolute_uri(self.redirect)
@classmethod
def get_key_and_secret(cls):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return setting(cls.SETTINGS_KEY_NAME), \
setting(cls.SETTINGS_SECRET_NAME)
@classmethod
def enabled(cls):
"""Return backend enabled status by checking basic settings"""
return setting(cls.SETTINGS_KEY_NAME) and \
setting(cls.SETTINGS_SECRET_NAME)
class ConsumerBasedOAuth(BaseOAuth):
"""Consumer based mechanism OAuth authentication, fill the needed
parameters to communicate properly with authentication service.
AUTHORIZATION_URL Authorization service url
REQUEST_TOKEN_URL Request token URL
ACCESS_TOKEN_URL Access token URL
SERVER_URL Authorization server URL
"""
AUTHORIZATION_URL = ''
REQUEST_TOKEN_URL = ''
ACCESS_TOKEN_URL = ''
SERVER_URL = ''
def auth_url(self):
"""Return redirect url"""
token = self.unauthorized_token()
name = self.AUTH_BACKEND.name + 'unauthorized_token_name'
self.request.session[name] = token.to_string()
return self.oauth_authorization_request(token).to_url()
def auth_complete(self, *args, **kwargs):
"""Return user, might be logged in"""
name = self.AUTH_BACKEND.name + 'unauthorized_token_name'
unauthed_token = self.request.session.get(name)
if not unauthed_token:
raise AuthTokenError('Missing unauthorized token')
token = Token.from_string(unauthed_token)
if token.key != self.data.get('oauth_token', 'no-token'):
raise AuthTokenError('Incorrect tokens')
try:
access_token = self.access_token(token)
except HTTPError, e:
if e.code == 400:
raise AuthCanceled(self)
else:
raise
data = self.user_data(access_token)
if data is not None:
data['access_token'] = access_token.to_string()
kwargs.update({
'auth': self,
'response': data,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def unauthorized_token(self):
"""Return request for unauthorized token (first stage)"""
request = self.oauth_request(token=None, url=self.REQUEST_TOKEN_URL,
extra_params=self.request_token_extra_arguments())
response = self.fetch_response(request)
return Token.from_string(response)
def oauth_authorization_request(self, token):
"""Generate OAuth request to authorize token."""
return OAuthRequest.from_token_and_callback(token=token,
callback=self.redirect_uri,
http_url=self.AUTHORIZATION_URL,
parameters=self.auth_extra_arguments())
def oauth_request(self, token, url, extra_params=None):
"""Generate OAuth request, setups callback url"""
return build_consumer_oauth_request(self, token, url,
self.redirect_uri,
self.data.get('oauth_verifier'),
extra_params)
def fetch_response(self, request):
"""Executes request and fetchs service response"""
response = dsa_urlopen(request.to_url())
return '\n'.join(response.readlines())
def access_token(self, token):
"""Return request for access token value"""
request = self.oauth_request(token, self.ACCESS_TOKEN_URL)
return Token.from_string(self.fetch_response(request))
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
raise NotImplementedError('Implement in subclass')
@property
def consumer(self):
"""Setups consumer"""
return OAuthConsumer(*self.get_key_and_secret())
class BaseOAuth2(BaseOAuth):
"""Base class for OAuth2 providers.
OAuth2 draft details at:
http://tools.ietf.org/html/draft-ietf-oauth-v2-10
Attributes:
AUTHORIZATION_URL Authorization service url
ACCESS_TOKEN_URL Token URL
"""
AUTHORIZATION_URL = None
ACCESS_TOKEN_URL = None
SCOPE_SEPARATOR = ' '
RESPONSE_TYPE = 'code'
SCOPE_VAR_NAME = None
DEFAULT_SCOPE = None
REDIRECT_STATE = True
def state_token(self):
"""Generate csrf token to include as state parameter."""
return get_random_string(32)
def get_redirect_uri(self, state):
"""Build redirect_uri with redirect_state parameter."""
uri = self.redirect_uri
if self.REDIRECT_STATE:
uri = url_add_parameters(uri, {'redirect_state': state})
return uri
def auth_url(self):
"""Return redirect url"""
client_id, client_secret = self.get_key_and_secret()
state = self.state_token()
# Store state in session for further request validation. The state
# value is passed as state parameter (as specified in OAuth2 spec), but
# also added to redirect_uri, that way we can still verify the request
# if the provider doesn't implement the state parameter.
self.request.session[self.AUTH_BACKEND.name + '_state'] = state
args = {
'client_id': client_id,
'state': state,
'redirect_uri': self.get_redirect_uri(state)
}
scope = self.get_scope()
if scope:
args['scope'] = self.SCOPE_SEPARATOR.join(self.get_scope())
if self.RESPONSE_TYPE:
args['response_type'] = self.RESPONSE_TYPE
args.update(self.auth_extra_arguments())
return self.AUTHORIZATION_URL + '?' + urlencode(args)
def validate_state(self):
"""Validate state value. Raises exception on error, returns state
value if valid."""
state = self.request.session.get(self.AUTH_BACKEND.name + '_state')
request_state = self.data.get('state') or \
self.data.get('redirect_state')
if not request_state:
raise AuthMissingParameter(self, 'state')
elif not state:
raise AuthStateMissing(self, 'state')
elif not constant_time_compare(request_state, state):
raise AuthStateForbidden(self)
return state
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if self.data.get('error'):
error = self.data.get('error_description') or self.data['error']
raise AuthFailed(self, error)
state = self.validate_state()
client_id, client_secret = self.get_key_and_secret()
params = {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': self.get_redirect_uri(state)
}
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
request = Request(self.ACCESS_TOKEN_URL, data=urlencode(params),
headers=headers)
try:
response = simplejson.loads(dsa_urlopen(request).read())
except HTTPError, e:
if e.code == 400:
raise AuthCanceled(self)
else:
raise
except (ValueError, KeyError):
raise AuthUnknownError(self)
if response.get('error'):
error = response.get('error_description') or response.get('error')
raise AuthFailed(self, error)
else:
data = self.user_data(response['access_token'], response)
response.update(data or {})
kwargs.update({
'auth': self,
'response': response,
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
def get_scope(self):
"""Return list with needed access scope"""
scope = self.DEFAULT_SCOPE or []
if self.SCOPE_VAR_NAME:
scope = scope + setting(self.SCOPE_VAR_NAME, [])
return scope
# Backend loading was previously performed via the
# SOCIAL_AUTH_IMPORT_BACKENDS setting - as it's no longer used,
# provide a deprecation warning.
if setting('SOCIAL_AUTH_IMPORT_BACKENDS'):
from warnings import warn
warn("SOCIAL_AUTH_IMPORT_SOURCES is deprecated")
# Cache for discovered backends.
BACKENDSCACHE = {}
def get_backends(force_load=False):
"""
Entry point to the BACKENDS cache. If BACKENDSCACHE hasn't been
populated, each of the modules referenced in
AUTHENTICATION_BACKENDS is imported and checked for a BACKENDS
definition and if enabled, added to the cache.
Previously all backends were attempted to be loaded at
import time of this module, which meant that backends that subclass
bases found in this module would not have the chance to be loaded
by the time they were added to this module's BACKENDS dict. See:
https://github.com/omab/django-social-auth/issues/204
This new approach ensures that backends are allowed to subclass from
bases in this module and still be picked up.
A force_load boolean arg is also provided so that get_backend
below can retry a requested backend that may not yet be discovered.
"""
if not BACKENDSCACHE or force_load:
for auth_backend in setting('AUTHENTICATION_BACKENDS'):
mod, cls_name = auth_backend.rsplit('.', 1)
module = import_module(mod)
backend = getattr(module, cls_name)
if issubclass(backend, SocialAuthBackend):
name = backend.name
backends = getattr(module, 'BACKENDS', {})
if name in backends and backends[name].enabled():
BACKENDSCACHE[name] = backends[name]
return BACKENDSCACHE
def get_backend(name, *args, **kwargs):
"""Returns a backend by name. Backends are stored in the BACKENDSCACHE
cache dict. If not found, each of the modules referenced in
AUTHENTICATION_BACKENDS is imported and checked for a BACKENDS
definition. If the named backend is found in the module's BACKENDS
definition, it's then stored in the cache for future access.
"""
try:
# Cached backend which has previously been discovered.
return BACKENDSCACHE[name](*args, **kwargs)
except KeyError:
# Force a reload of BACKENDS to ensure a missing
# backend hasn't been missed.
get_backends(force_load=True)
try:
return BACKENDSCACHE[name](*args, **kwargs)
except KeyError:
return None
BACKENDS = {
'openid': OpenIdAuth
}
| |
import os
import re
import time
import copy
from datetime import datetime, timedelta
import sys, requests, json
import bdd_test_util
OPENCHAIN_REST_PORT = 5000
class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService
def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue
def parseComposeOutput(context):
"""Parses the compose output results and set appropriate values into context. Merges existing with newly composed."""
# Use the prefix to get the container name
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
containerNames = []
for l in context.compose_error.splitlines():
print(l.split())
containerNames.append(l.split()[1])
print(containerNames)
# Now get the Network Address for each name, and set the ContainerData onto the context.
containerDataList = []
for containerName in containerNames:
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True)
#print("container {0} has address = {1}".format(containerName, output.splitlines()[0]))
ipAddress = output.splitlines()[0]
# Get the environment array
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True)
env = output.splitlines()[0][1:-1].split()
# Get the Labels to access the com.docker.compose.service value
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True)
labels = output.splitlines()[0][4:-1].split()
dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0]
print("dockerComposeService = {0}".format(dockerComposeService))
print("container {0} has env = {1}".format(containerName, env))
containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService))
# Now merge the new containerData info with existing
newContainerDataList = []
if "compose_containers" in context:
# Need to merge I new list
newContainerDataList = context.compose_containers
newContainerDataList = newContainerDataList + containerDataList
setattr(context, "compose_containers", newContainerDataList)
print("")
def ipFromContainerNamePart(namePart, containerDataList):
"""Returns the IPAddress based upon a name part of the full container name"""
ip = None
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
for containerData in containerDataList:
if containerData.containerName.startswith(containerNamePrefix + namePart):
ip = containerData.ipAddress
if ip == None:
raise Exception("Could not find container with namePart = {0}".format(namePart))
return ip
def buildUrl(ipAddress, path):
return "http://{0}:{1}{2}".format(ipAddress, OPENCHAIN_REST_PORT, path)
@given(u'we compose "{composeYamlFile}"')
def step_impl(context, composeYamlFile):
# Use the uninstalled version of `cf active-deploy` rather than the installed version on the OS $PATH
#cmd = os.path.dirname(os.path.abspath(__file__)) + "/../../../cf_update/v1/cf_update.py"
# Expand $vars, e.g. "--path $PATH" becomes "--path /bin"
#args = re.sub('\$\w+', lambda v: os.getenv(v.group(0)[1:]), composeYamlFile)
context.compose_yaml = composeYamlFile
context.compose_output, context.compose_error, context.compose_returncode = \
bdd_test_util.cli_call(context, ["docker-compose", "-f", composeYamlFile, "up","--force-recreate", "-d"], expect_success=True)
assert context.compose_returncode == 0, "docker-compose failed to bring up {0}".format(composeYamlFile)
parseComposeOutput(context)
@when(u'requesting "{path}" from "{containerName}"')
def step_impl(context, path, containerName):
ipAddress = ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(ipAddress, path)
print("Requesting path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'})
assert resp.status_code == 200, "Failed to GET url %s: %s" % (request_url,resp.text)
context.response = resp
print("")
@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert attribute in context.response.json(), "Attribute not found in response (%s)" %(attribute)
foundValue = context.response.json()[attribute]
assert (str(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@given(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@when(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@then(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@when(u'I deploy chaincode "{chaincodePath}" with ctor "{ctor}" to "{containerName}"')
def step_impl(context, chaincodePath, ctor, containerName):
ipAddress = ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(ipAddress, "/devops/deploy")
print("Requesting path = {0}".format(request_url))
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
typeGolang = 1
# Create a ChaincodeSpec structure
chaincodeSpec = {
"type": typeGolang,
"chaincodeID": {
"path" : chaincodePath,
"name" : ""
},
"ctorMsg": {
"function" : ctor,
"args" : args
},
#"secureContext" : "binhn"
}
if 'userName' in context:
chaincodeSpec["secureContext"] = context.userName
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeSpec))
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
chaincodeName = resp.json()['message']
chaincodeSpec['chaincodeID']['name'] = chaincodeName
context.chaincodeSpec = chaincodeSpec
print(json.dumps(chaincodeSpec, indent=4))
print("")
@then(u'I should have received a chaincode name')
def step_impl(context):
if 'chaincodeSpec' in context:
assert context.chaincodeSpec['chaincodeID']['name'] != ""
# Set the current transactionID to the name passed back
context.transactionID = context.chaincodeSpec['chaincodeID']['name']
else:
fail('chaincodeSpec not in context')
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" "{times}" times')
def step_impl(context, chaincodeName, functionName, containerName, times):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
for i in range(int(times)):
invokeChaincode(context, functionName, containerName)
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
invokeChaincode(context, functionName, containerName)
@then(u'I should have received a transactionID')
def step_impl(context):
assert 'transactionID' in context, 'transactionID not found in context'
assert context.transactionID != ""
pass
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeChaincode(context, "query", containerName)
def invokeChaincode(context, functionName, containerName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
# Update hte chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
context.chaincodeSpec['ctorMsg']['function'] = functionName
context.chaincodeSpec['ctorMsg']['args'] = args
# Invoke the POST
chaincodeInvocationSpec = {
"chaincodeSpec" : context.chaincodeSpec
}
ipAddress = ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(ipAddress, "/devops/{0}".format(functionName))
print("POSTing path = {0}".format(request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeInvocationSpec))
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'message' in resp.json():
transactionID = context.response.json()['message']
context.transactionID = transactionID
@then(u'I wait "{seconds}" seconds for chaincode to build')
def step_impl(context, seconds):
""" This step takes into account the chaincodeImagesUpToDate tag, in which case the wait is reduce to some default seconds"""
reducedWaitTime = 4
if 'chaincodeImagesUpToDate' in context.tags:
print("Assuming images are up to date, sleeping for {0} seconds instead of {1} in scenario {2}".format(reducedWaitTime, seconds, context.scenario.name))
time.sleep(float(reducedWaitTime))
else:
time.sleep(float(seconds))
@then(u'I wait "{seconds}" seconds for transaction to be committed to block on "{containerName}"')
def step_impl(context, seconds, containerName):
assert 'transactionID' in context, "transactionID not found in context"
ipAddress = ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(ipAddress, "/transactions/{0}".format(context.transactionID))
print("GETing path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'})
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
def multiRequest(context, seconds, containerDataList, pathBuilderFunc):
"""Perform a multi request against the system"""
# Build map of "containerName" : response
respMap = {container.containerName:None for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(ipAddress, pathBuilderFunc(context, container))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("GETing path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'})
respMap[container.containerName] = resp
else:
raise Exception("Max time exceeded waiting for multiRequest with current response map = {0}".format(respMap))
@then(u'I wait up to "{seconds}" seconds for transaction to be committed to all peers')
def step_impl(context, seconds):
assert 'transactionID' in context, "transactionID not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in context.compose_containers}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in context.compose_containers:
ipAddress = container.ipAddress
request_url = buildUrl(ipAddress, "/transactions/{0}".format(context.transactionID))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("GETing path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'})
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
# Success, continue
respMap[container.containerName] = 200
break
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
def getContainerDataValuesFromContext(context, aliases, callback):
"""Returns the IPAddress based upon a name part of the full container name"""
assert 'compose_containers' in context, "compose_containers not found in context"
values = []
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
for namePart in aliases:
for containerData in context.compose_containers:
if containerData.containerName.startswith(containerNamePrefix + namePart):
values.append(callback(containerData))
break
return values
@then(u'I wait up to "{seconds}" seconds for transaction to be committed to peers')
def step_impl(context, seconds):
assert 'transactionID' in context, "transactionID not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
aliases = context.table.headings
containerDataList = getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(ipAddress, "/transactions/{0}".format(context.transactionID))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("GETing path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'})
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
# Success, continue
respMap[container.containerName] = 200
break
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on all peers')
def step_impl(context, chaincodeName, functionName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
context.chaincodeSpec['ctorMsg']['function'] = functionName
context.chaincodeSpec['ctorMsg']['args'] = args #context.table[0].cells if ('table' in context) else []
# Invoke the POST
chaincodeInvocationSpec = {
"chaincodeSpec" : context.chaincodeSpec
}
responses = []
for container in context.compose_containers:
request_url = buildUrl(container.ipAddress, "/devops/{0}".format(functionName))
print("POSTing path = {0}".format(request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeInvocationSpec))
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
responses.append(resp)
context.responses = responses
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
def step_impl(context, chaincodeName, functionName, value):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
assert 'peerToSecretMessage' in context, "peerToSecretMessage map not found in context"
aliases = context.table.headings
containerDataList = getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Update the chaincodeSpec ctorMsg for invoke
context.chaincodeSpec['ctorMsg']['function'] = functionName
context.chaincodeSpec['ctorMsg']['args'] = [value]
# Invoke the POST
# Make deep copy of chaincodeSpec as we will be changing the SecurityContext per call.
chaincodeInvocationSpec = {
"chaincodeSpec" : copy.deepcopy(context.chaincodeSpec)
}
responses = []
for container in containerDataList:
# Change the SecurityContext per call
chaincodeInvocationSpec['chaincodeSpec']["secureContext"] = context.peerToSecretMessage[container.composeService]['enrollId']
print("Container {0} enrollID = {1}".format(container.containerName, container.getEnv("OPENCHAIN_SECURITY_ENROLLID")))
request_url = buildUrl(container.ipAddress, "/devops/{0}".format(functionName))
print("POSTing path = {0}".format(request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeInvocationSpec), timeout=3)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
print("RESULT from {0} of chaincode from peer {1}".format(functionName, container.containerName))
print(json.dumps(resp.json(), indent = 4))
responses.append(resp)
context.responses = responses
@then(u'I should get a JSON response from all peers with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert 'responses' in context, "responses not found in context"
for resp in context.responses:
assert attribute in resp.json(), "Attribute not found in response (%s)" %(attribute)
foundValue = resp.json()[attribute]
assert (str(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@then(u'I should get a JSON response from peers with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert 'responses' in context, "responses not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
for resp in context.responses:
assert attribute in resp.json(), "Attribute not found in response (%s)" %(attribute)
foundValue = resp.json()[attribute]
assert (str(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@given(u'I register with CA supplying username "{userName}" and secret "{secret}" on peers')
def step_impl(context, userName, secret):
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
# Get list of IPs to login to
aliases = context.table.headings
ipAddressList = getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData.ipAddress)
secretMsg = {
"enrollId": userName,
"enrollSecret" : secret
}
# Login to each container specified
for ipAddress in ipAddressList:
request_url = buildUrl(ipAddress, "/registrar")
print("POSTing path = {0}".format(request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg))
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("message = {0}".format(resp.json()))
# Store the username in the context
context.userName = userName
# if we already have the chaincodeSpec, change secureContext
if 'chaincodeSpec' in context:
context.chaincodeSpec["secureContext"] = context.userName
@given(u'I use the following credentials for querying peers')
def step_impl(context):
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers, username, secret) not found in context"
peerToSecretMessage = {}
# Login to each container specified using username and secret
for row in context.table.rows:
peer, userName, secret = row['peer'], row['username'], row['secret']
secretMsg = {
"enrollId": userName,
"enrollSecret" : secret
}
ipAddress = ipFromContainerNamePart(peer, context.compose_containers)
request_url = buildUrl(ipAddress, "/registrar")
print("POSTing to service = {0}, path = {1}".format(peer, request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg))
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("message = {0}".format(resp.json()))
peerToSecretMessage[peer] = secretMsg
context.peerToSecretMessage = peerToSecretMessage
@given(u'I stop peers')
def step_impl(context):
assert 'table' in context, "table (of peers) not found in context"
assert 'compose_yaml' in context, "compose_yaml not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
services = context.table.headings
# Loop through services and stop them, and remove from the container data list if stopped successfully.
for service in services:
context.compose_output, context.compose_error, context.compose_returncode = \
bdd_test_util.cli_call(context, ["docker-compose", "-f", context.compose_yaml, "stop", service], expect_success=True)
assert context.compose_returncode == 0, "docker-compose failed to stop {0}".format(service)
#remove from the containerDataList
context.compose_containers = [containerData for containerData in context.compose_containers if containerData.composeService != service]
print("After stopping, the container serive list is = {0}".format([containerData.composeService for containerData in context.compose_containers]))
@given(u'I start peers')
def step_impl(context):
assert 'table' in context, "table (of peers) not found in context"
assert 'compose_yaml' in context, "compose_yaml not found in context"
services = context.table.headings
# Loop through services and start them
for service in services:
context.compose_output, context.compose_error, context.compose_returncode = \
bdd_test_util.cli_call(context, ["docker-compose", "-f", context.compose_yaml, "start", service], expect_success=True)
assert context.compose_returncode == 0, "docker-compose failed to start {0}".format(service)
parseComposeOutput(context)
print("After starting peers, the container service list is = {0}".format([containerData.composeService + ":" + containerData.ipAddress for containerData in context.compose_containers]))
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators with layer annotations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import pickle
from google.protobuf.any_pb2 import Any
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses
from tensorflow.python.saved_model import utils as saved_model_utils
class LayerAnnotationsCollectionNames(object):
"""Names for the collections containing the annotations."""
UNPROCESSED_FEATURES = 'layer_annotations/unprocessed_features'
PROCESSED_FEATURES = 'layer_annotatons/processed_features'
FEATURE_COLUMNS = 'layer_annotations/feature_columns'
@classmethod
def keys(cls, collection_name):
return '%s/keys' % collection_name
@classmethod
def values(cls, collection_name):
return '%s/values' % collection_name
def serialize_feature_column(feature_column):
if isinstance(feature_column, feature_column_lib._EmbeddingColumn): # pylint: disable=protected-access
# We can't pickle nested functions, and we don't need the value of
# layer_creator in most cases anyway, so just discard its value.
args = feature_column._asdict()
args['layer_creator'] = None
temp = type(feature_column)(**args)
return pickle.dumps(temp)
return pickle.dumps(feature_column)
def _to_any_wrapped_tensor_info(tensor):
"""Converts a `Tensor` to a `TensorInfo` wrapped in a proto `Any`."""
any_buf = Any()
tensor_info = saved_model_utils.build_tensor_info(tensor)
any_buf.Pack(tensor_info)
return any_buf
def make_input_layer_with_layer_annotations(original_input_layer):
"""Make an input_layer replacement function that adds layer annotations."""
def input_layer_with_layer_annotations(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
scope=None,
cols_to_output_tensors=None,
from_template=False):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with
FeatureColumns.
At the first layer of the model, this column oriented data should be
converted
to a single `Tensor`.
This is like tf.feature_column.input_layer, except with added
Integrated-Gradient annotations.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values can be a `SparseTensor` or a `Tensor` depends
on corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
weight_collections: A list of collection names to which the Variable will
be added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with
a mapping from `_FeatureColumn` to list of `Variable`s. For example,
after the call, we might have cols_to_vars = {_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn( key='sparse_feature',
hash_bucket_size=5, dtype=tf.string), dimension=10): [<tf.Variable
'some_variable:0' shape=(5, 10), <tf.Variable 'some_variable:1'
shape=(5, 10)]} If a column creates no variables, its value will be an
empty list.
scope: A name or variable scope to use
cols_to_output_tensors: If not `None`, must be a dictionary that will be
filled with a mapping from '_FeatureColumn' to the associated output
`Tensor`s.
from_template: True if the method is being instantiated from a
`make_template`.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: features and feature_columns have different lengths.
"""
local_cols_to_output_tensors = {}
input_layer = original_input_layer(
features=features,
feature_columns=feature_columns,
weight_collections=weight_collections,
trainable=trainable,
cols_to_vars=cols_to_vars,
scope=scope,
cols_to_output_tensors=local_cols_to_output_tensors,
from_template=from_template)
if cols_to_output_tensors is not None:
cols_to_output_tensors = local_cols_to_output_tensors
# Annotate features.
# These are the parsed Tensors, before embedding.
# Only annotate features used by FeatureColumns.
# We figure which ones are used by FeatureColumns by creating a parsing
# spec and looking at the keys.
spec = feature_column_lib.make_parse_example_spec(feature_columns)
for key in spec.keys():
tensor = ops.convert_to_tensor_or_indexed_slices(features[key])
ops.add_to_collection(
LayerAnnotationsCollectionNames.keys(
LayerAnnotationsCollectionNames.UNPROCESSED_FEATURES), key)
ops.add_to_collection(
LayerAnnotationsCollectionNames.values(
LayerAnnotationsCollectionNames.UNPROCESSED_FEATURES),
_to_any_wrapped_tensor_info(tensor))
# Annotate feature columns.
for column in feature_columns:
# TODO(cyfoo): Find a better way to serialize and deserialize
# _FeatureColumn.
ops.add_to_collection(LayerAnnotationsCollectionNames.FEATURE_COLUMNS,
serialize_feature_column(column))
for column, tensor in local_cols_to_output_tensors.items():
ops.add_to_collection(
LayerAnnotationsCollectionNames.keys(
LayerAnnotationsCollectionNames.PROCESSED_FEATURES), column.name)
ops.add_to_collection(
LayerAnnotationsCollectionNames.values(
LayerAnnotationsCollectionNames.PROCESSED_FEATURES),
_to_any_wrapped_tensor_info(tensor))
return input_layer
return input_layer_with_layer_annotations
@contextlib.contextmanager
def _monkey_patch(module, function, replacement):
old_function = getattr(module, function)
setattr(module, function, replacement)
yield
setattr(module, function, old_function)
def DNNClassifierWithLayerAnnotations( # pylint: disable=invalid-name
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""A classifier for TensorFlow DNN models with layer annotations.
This classifier is fuctionally identical to estimator.DNNClassifier as far as
training and evaluating models is concerned. The key difference is that this
classifier adds additional layer annotations, which can be used for computing
Integrated Gradients.
Integrated Gradients is a method for attributing a classifier's predictions
to its input features (https://arxiv.org/pdf/1703.01365.pdf). Given an input
instance, the method assigns attribution scores to individual features in
proportion to the feature's importance to the classifier's prediction.
See estimator.DNNClassifer for example code for training and evaluating models
using this classifier.
This classifier is checkpoint-compatible with estimator.DNNClassifier and
therefore the following should work seamlessly:
# Instantiate ordinary estimator as usual.
estimator = tf.estimator.DNNClassifier(
config, feature_columns, hidden_units, ...)
# Train estimator, export checkpoint.
tf.estimator.train_and_evaluate(estimator, ...)
# Instantiate estimator with annotations with the same configuration as the
# ordinary estimator.
estimator_with_annotations = (
tf.contrib.estimator.DNNClassifierWithLayerAnnotations(
config, feature_columns, hidden_units, ...))
# Call export_savedmodel with the same arguments as the ordinary estimator,
# using the checkpoint produced for the ordinary estimator.
estimator_with_annotations.export_saved_model(
export_dir_base, serving_input_receiver, ...
checkpoint_path='/path/to/ordinary/estimator/checkpoint/model.ckpt-1234')
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second
one has 32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can also
be used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then
weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are already
encoded as integer or float within [0, 1] for `n_classes=2` and encoded as
integer values in {0, 1,..., n_classes-1} for `n_classes`>2 . Also there
will be errors if vocabulary is not provided and labels are string.
optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or a
`WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
Returns:
DNNClassifier with layer annotations.
"""
original = dnn.DNNClassifier(
hidden_units=hidden_units,
feature_columns=feature_columns,
model_dir=model_dir,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config,
warm_start_from=warm_start_from,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
with _monkey_patch(
feature_column_lib, '_internal_input_layer',
make_input_layer_with_layer_annotations(
feature_column_lib._internal_input_layer)): # pylint: disable=protected-access
return original.model_fn(features, labels, mode, config)
return estimator.Estimator(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
warm_start_from=warm_start_from)
def DNNRegressorWithLayerAnnotations( # pylint: disable=invalid-name
hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
):
"""A regressor for TensorFlow DNN models with layer annotations.
This regressor is fuctionally identical to estimator.DNNRegressor as far as
training and evaluating models is concerned. The key difference is that this
classifier adds additional layer annotations, which can be used for computing
Integrated Gradients.
Integrated Gradients is a method for attributing a classifier's predictions
to its input features (https://arxiv.org/pdf/1703.01365.pdf). Given an input
instance, the method assigns attribution scores to individual features in
proportion to the feature's importance to the classifier's prediction.
See estimator.DNNRegressor for example code for training and evaluating models
using this regressor.
This regressor is checkpoint-compatible with estimator.DNNRegressor and
therefore the following should work seamlessly:
# Instantiate ordinary estimator as usual.
estimator = tf.estimator.DNNRegressor(
config, feature_columns, hidden_units, ...)
# Train estimator, export checkpoint.
tf.estimator.train_and_evaluate(estimator, ...)
# Instantiate estimator with annotations with the same configuration as the
# ordinary estimator.
estimator_with_annotations = (
tf.contrib.estimator.DNNRegressorWithLayerAnnotations(
config, feature_columns, hidden_units, ...))
# Call export_savedmodel with the same arguments as the ordinary estimator,
# using the checkpoint produced for the ordinary estimator.
estimator_with_annotations.export_saved_model(
export_dir_base, serving_input_receiver, ...
checkpoint_path='/path/to/ordinary/estimator/checkpoint/model.ckpt-1234')
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second
one has 32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can also
be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the size
of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then
weight_column.normalizer_fn is applied on it to get weight tensor.
optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or a
`WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
Returns:
DNNRegressor with layer annotations.
"""
original = dnn.DNNRegressor(
hidden_units=hidden_units,
feature_columns=feature_columns,
model_dir=model_dir,
label_dimension=label_dimension,
weight_column=weight_column,
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config,
warm_start_from=warm_start_from,
loss_reduction=loss_reduction,
)
def _model_fn(features, labels, mode, config):
with _monkey_patch(
feature_column_lib, '_internal_input_layer',
make_input_layer_with_layer_annotations(
feature_column_lib._internal_input_layer)): # pylint: disable=protected-access
return original.model_fn(features, labels, mode, config)
return estimator.Estimator(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
warm_start_from=warm_start_from)
| |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
from io import BytesIO
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .status_codes import codes
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .utils import (
stream_untransfer, guess_filename, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 10 * 1024
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, list):
for v in val:
new_fields.append((field, builtin_str(v)))
else:
new_fields.append((field, builtin_str(val)))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
p.prepare_auth(self.auth)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, netloc, path, _params, query, fragment = urlparse(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
try:
netloc = netloc.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(_params, str):
_params = _params.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, _params, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError):
length = False
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = length
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.headers['Content-Length'] = '0'
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
self.headers['Content-Length'] = str(len(body))
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_auth(self, auth):
"""Prepares the given HTTP auth data."""
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. This avoids reading the content
at once into memory for large responses. The chunk size is the number
of bytes it should read into memory. This is not necessarily the
length of each item returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = stream_untransfer(generate(), self)
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. This
avoids reading the content at once into memory for large
responses.
"""
pending = None
for chunk in self.iter_content(
chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code is 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.text or self.content)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers['link']
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError` or :class:`URLError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
http_error = HTTPError(http_error_msg)
http_error.response = self
raise http_error
def close(self):
return self.raw.release_conn()
| |
'''
Copyright (c) 2014, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an
agency of the United States Government. Neither the United States
Government nor the United States Department of Energy, nor Battelle,
nor any of their employees, nor any jurisdiction or organization
that has cooperated in the development of these materials, makes
any warranty, express or implied, or assumes any legal liability
or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed,
or represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or
service by trade name, trademark, manufacturer, or otherwise does
not necessarily constitute or imply its endorsement, recommendation,
or favoring by the United States Government or any agency thereof,
or Battelle Memorial Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the
United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
'''
import datetime
from datetime import timedelta as td
import logging
from volttron.platform.agent.driven import Results, AbstractDrivenAgent
from zmq.utils import jsonapi
from volttron.platform.agent import utils
from volttron.platform.messaging import headers as headers_mod, topics
__version__ = '3.1'
ECON1 = 'Temperature Sensor Dx'
ECON2 = 'Not Economizing When Unit Should Dx'
ECON3 = 'Economizing When Unit Should Not Dx'
ECON4 = 'Excess Outdoor-air Intake Dx'
ECON5 = 'Insufficient Outdoor-air Intake Dx'
DX = '/diagnostic message'
EI = '/energy impact'
DATA = '/data/'
RAT = 'ReturnAirTemperature'
MAT = 'MixedAirTemperature'
OAT = 'OutsideAirTemperature'
OAD = 'OutsideDamperSignal'
CC = 'CoolCall'
FS = 'SupplyFanSpeed'
EC = 'EconomizerCondition'
ST = 'State'
class Application(AbstractDrivenAgent):
'''Application to detect and correct operational problems for AHUs/RTUs.
This application uses metered data from zones server by an AHU/RTU
to detect operational problems and where applicable correct these problems
by modifying set points. When auto-correction cannot be applied then
a message detailing the diagnostic results will be made available to
the building operator.
'''
# Diagnostic Point Names (Must match OpenEIS data-type names)
def __init__(self, economizer_type='DDB', econ_hl_temp=65.0,
device_type='AHU', temp_deadband=1.0,
data_window=30, no_required_data=20,
open_damper_time=5,
low_supply_fan_threshold=20.0,
mat_low_threshold=50.0, mat_high_threshold=90.0,
oat_low_threshold=30.0, oat_high_threshold=100.0,
rat_low_threshold=50.0, rat_high_threshold=90.0,
temp_difference_threshold=4.0, oat_mat_check=5.0,
open_damper_threshold=90.0, oaf_economizing_threshold=25.0,
oaf_temperature_threshold=4.0,
cooling_enabled_threshold=5.0,
minimum_damper_setpoint=15.0, excess_damper_threshold=20.0,
excess_oaf_threshold=20.0, desired_oaf=10.0,
ventilation_oaf_threshold=5.0,
insufficient_damper_threshold=15.0,
temp_damper_threshold=90.0, rated_cfm=6000.0, eer=10.0,
**kwargs):
# initialize user configurable parameters.
# super(Application, self).__init__(**kwargs)
Application.analysis = kwargs['device']['analysis_name']
self.fan_status_name = kwargs['fan_status']
self.oat_name = kwargs['oa_temp']
self.rat_name = kwargs['ra_temp']
self.mat_name = kwargs['ma_temp']
self.oad_sig_name = kwargs['damper_signal']
self.cool_call_name = kwargs['cool_call']
self.fan_sp_name = kwargs['fan_speedcmd']
data_window = int(data_window)
open_damper_time = int(open_damper_time)
self.device_type = device_type.lower()
self.economizer_type = economizer_type.lower()
if self.economizer_type == 'hl':
self.econ_hl_temp = float(econ_hl_temp)
Application.pre_requiste_messages = []
Application.pre_msg_time = []
self.oaf_temp_thr = float(oaf_temperature_threshold)
# Application thresholds (Configurable)
self.data_window = float(data_window)
no_required_data = int(no_required_data)
self.mat_low_threshold = float(mat_low_threshold)
self.mat_high_threshold = float(mat_high_threshold)
self.oat_low_threshold = float(oat_low_threshold)
self.oat_high_threshold = float(oat_high_threshold)
self.rat_low_threshold = float(rat_low_threshold)
self.rat_high_threshold = float(rat_high_threshold)
self.temp_deadband = float(temp_deadband)
self.low_supply_fan_threshold = float(low_supply_fan_threshold)
self.cooling_enabled_threshold = float(cooling_enabled_threshold)
cfm = float(rated_cfm)
eer = float(eer)
# Pre-requisite messages
self.pre_msg1 = ('Supply fan is off, current data will '
'not be used for diagnostics.')
self.pre_msg2 = ('Supply fan status data is missing '
'from input(device or csv), could '
'not verify system was ON.')
self.pre_msg3 = ('Missing required data for diagnostic: '
'Check BACnet configuration or CSV file '
'input for outside-air temperature.')
self.pre_msg4 = ('Missing required data for diagnostic: '
'Check BACnet configuration or CSV file '
'input for return-air temperature.')
self.pre_msg5 = ('Missing required data for diagnostic: '
'Check BACnet configuration or CSV '
'file input for mixed-air temperature.')
self.pre_msg6 = ('Missing required data for diagnostic: '
'Check BACnet configuration or CSV '
'file input for damper signal.')
self.pre_msg7 = ''.join(['Missing required data for diagnostic: ',
'Check BACnet configuration or CSV file '
'input for cooling call (AHU cooling coil,'
'RTU cooling call or compressor command).'])
self.pre_msg8 = ('Outside-air temperature is outside high/low '
'operating limits, check the functionality of '
'the temperature sensor.')
self.pre_msg9 = ('Return-air temperature is outside high/low '
'operating limits, check the functionality of '
'the temperature sensor.')
self.pre_msg10 = ('Mixed-air temperature is outside high/low '
'operating limits, check the functionality '
'of the temperature sensor.')
self.econ1 = TempSensorDx(data_window, no_required_data,
temp_difference_threshold, open_damper_time,
oat_mat_check, temp_damper_threshold)
self.econ2 = EconCorrectlyOn(oaf_economizing_threshold,
open_damper_threshold,
data_window, no_required_data, cfm, eer)
self.econ3 = EconCorrectlyOff(data_window, no_required_data,
minimum_damper_setpoint,
excess_damper_threshold,
cooling_enabled_threshold,
desired_oaf, cfm, eer)
self.econ4 = ExcessOA(data_window, no_required_data,
excess_oaf_threshold,
minimum_damper_setpoint,
excess_damper_threshold,
desired_oaf, cfm, eer)
self.econ5 = InsufficientOA(data_window, no_required_data,
ventilation_oaf_threshold,
minimum_damper_setpoint,
insufficient_damper_threshold,
desired_oaf)
def run(self, cur_time, points):
'''Main run method that is called by the DrivenBaseClass.
run receives a dictionary of data 'points' and an associated timestamp
for the data cur_time'. run then passes the appropriate data to
each diagnostic when calling
the diagnostic message.
'''
device_dict = {}
dx_result = Results()
for key, value in points.items():
device_dict[key.lower()] = value
fan_check = False
for key, value in device_dict.items():
if key.startswith(self.fan_status_name):
if value is not None and not int(value):
Application.pre_requiste_messages.append(self.pre_msg1)
dx_result = self.pre_message(dx_result, cur_time)
return dx_result
elif value is not None:
fan_check = True
if not fan_check and self.fan_sp_name is not None:
for key, value in device_dict.items():
if key.startswith(self.fan_sp_name):
fan_check = True
if value < self.low_supply_fan_threshold:
Application.pre_requiste_messages.append(self.pre_msg1)
dx_result = self.pre_message(dx_result, cur_time)
return dx_result
if not fan_check:
Application.pre_requiste_messages.append(self.pre_msg2)
dx_result = self.pre_message(dx_result, cur_time)
return dx_result
damper_data = []
oat_data = []
mat_data = []
rat_data = []
cooling_data = []
fan_sp_data = []
for key, value in device_dict.items():
if key.startswith(self.oad_sig_name) and value is not None:
damper_data.append(value)
elif key.startswith(self.oat_name) and value is not None:
oat_data.append(value)
elif key.startswith(self.mat_name) and value is not None:
mat_data.append(value)
elif key.startswith(self.rat_name) and value is not None:
rat_data.append(value)
elif key.startswith(self.cool_call_name) and value is not None:
cooling_data.append(value)
elif key.startswith(self.fan_sp_name) and value is not None:
fan_sp_data.append(value)
if not oat_data:
Application.pre_requiste_messages.append(self.pre_msg3)
if not rat_data:
Application.pre_requiste_messages.append(self.pre_msg4)
if not mat_data:
Application.pre_requiste_messages.append(self.pre_msg5)
if not damper_data:
Application.pre_requiste_messages.append(self.pre_msg6)
if not cooling_data:
Application.pre_requiste_messages.append(self.pre_msg7)
if not (oat_data and rat_data and mat_data and
damper_data and cooling_data):
dx_result = self.pre_message(dx_result, cur_time)
return dx_result
oatemp = (sum(oat_data) / len(oat_data))
ratemp = (sum(rat_data) / len(rat_data))
matemp = (sum(mat_data) / len(mat_data))
damper_signal = (sum(damper_data) / len(damper_data))
fan_speedcmd = None
if fan_sp_data:
fan_speedcmd = sum(fan_sp_data)/len(fan_sp_data)
limit_check = False
if oatemp < self.oat_low_threshold or oatemp > self.oat_high_threshold:
Application.pre_requiste_messages.append(self.pre_msg8)
limit_check = True
if ratemp < self.rat_low_threshold or ratemp > self.rat_high_threshold:
Application.pre_requiste_messages.append(self.pre_msg9)
limit_check = True
if matemp < self.mat_low_threshold or matemp > self.mat_high_threshold:
Application.pre_requiste_messages.append(self.pre_msg10)
limit_check = True
if limit_check:
dx_result = self.pre_message(dx_result, cur_time)
return dx_result
if abs(oatemp - ratemp) < self.oaf_temp_thr:
dx_result.log('OAT and RAT are too close, economizer diagnostic '
'will not use data corresponding to: {timestamp} '
.format(timestamp=str(cur_time)), logging.DEBUG)
return dx_result
device_type_error = False
if self.device_type == 'ahu':
cooling_valve = sum(cooling_data) / len(cooling_data)
if cooling_valve > self.cooling_enabled_threshold:
cooling_call = True
else:
cooling_call = False
elif self.device_type == 'rtu':
cooling_call = int(max(cooling_data))
else:
device_type_error = True
dx_result.log('device_type must be specified as "AHU" or "RTU" '
'Check Configuration input.', logging.INFO)
if device_type_error:
return dx_result
if self.economizer_type == 'ddb':
econ_condition = (oatemp < (ratemp - self.temp_deadband))
else:
econ_condition = \
oatemp < (self.econ_hl_temp - self.temp_deadband)
dx_result = self.econ1.econ_alg1(dx_result, oatemp, ratemp, matemp,
damper_signal, cur_time)
if (TempSensorDx.temp_sensor_problem is not None and
TempSensorDx.temp_sensor_problem is False):
dx_result = self.econ2.econ_alg2(dx_result, cooling_call, oatemp,
ratemp, matemp, damper_signal,
econ_condition, cur_time,
fan_speedcmd)
dx_result = self.econ3.econ_alg3(dx_result, oatemp, ratemp, matemp,
damper_signal, econ_condition,
cur_time, fan_speedcmd,
cooling_call)
dx_result = self.econ4.econ_alg4(dx_result, oatemp, ratemp, matemp,
damper_signal, econ_condition,
cur_time, fan_speedcmd,
cooling_call)
dx_result = self.econ5.econ_alg5(dx_result, oatemp, ratemp, matemp,
damper_signal, econ_condition,
cur_time, cooling_call)
else:
dx_result = self.econ2.clear_data(dx_result)
dx_result = self.econ3.clear_data(dx_result)
dx_result = self.econ4.clear_data(dx_result)
dx_result = self.econ5.clear_data(dx_result)
TempSensorDx.temp_sensor_problem = None
return dx_result
def pre_message(self, dx_result, cur_time):
'''Handle reporting of diagnostic pre-requisite messages.
Report to user when conditions are not favorable for a diagnostic.
'''
Application.pre_msg_time.append(cur_time)
pre_check = ((Application.pre_msg_time[-1] -
Application.pre_msg_time[0])
.total_seconds()/60)
pre_check = pre_check if pre_check > 0.0 else 1.0
if pre_check >= self.data_window:
msg_lst = [self.pre_msg1, self.pre_msg2, self.pre_msg3,
self.pre_msg4, self.pre_msg5, self.pre_msg6,
self.pre_msg7, self.pre_msg8, self.pre_msg9,
self.pre_msg10]
for item in msg_lst:
if (Application.pre_requiste_messages.count(item) >
(0.25) * len(Application.pre_msg_time)):
dx_result.log(item, logging.DEBUG)
Application.pre_requiste_messages = []
Application.pre_msg_time = []
return dx_result
class TempSensorDx(object):
'''Air-side HVAC temperature sensor diagnostic for AHU/RTU systems.
TempSensorDx uses metered data from a BAS or controller to
diagnose if any of the temperature sensors for an AHU/RTU are accurate and
reliable.
'''
def __init__(self, data_window, no_required_data,
temp_diff_thr, open_damper_time,
oat_mat_check, temp_damper_threshold):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.timestamp = []
self.open_oat = []
self.open_mat = []
self.econ_check = False
self.steady_state_st = None
self.open_damper_time = int(open_damper_time)
self.econ_time_check = td(minutes=self.open_damper_time - 1)
TempSensorDx.temp_sensor_problem = None
self.max_dx_time = 60
'''Application thresholds (Configurable)'''
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.temp_diff_thr = float(temp_diff_thr)
self.oat_mat_check = float(oat_mat_check)
self.temp_damper_threshold = float(temp_damper_threshold)
def econ_alg1(self, dx_result, oatemp,
ratemp, matemp, damper_signal, cur_time):
'''Check app. pre-quisites and assemble data set for analysis.'''
if (damper_signal) > self.temp_damper_threshold:
if not self.econ_check:
self.econ_check = True
self.steady_state_st = cur_time
if (cur_time - self.steady_state_st) >= self.econ_time_check:
self.open_oat.append(oatemp)
self.open_mat.append(matemp)
else:
self.econ_check = False
self.oat_values.append(oatemp)
self.mat_values.append(matemp)
self.rat_values.append(ratemp)
if (self.timestamp and
((cur_time - self.timestamp[-1])
.total_seconds()/60) > 5.0):
self.econ_check = False
self.timestamp.append(cur_time)
elapsed_time = ((self.timestamp[-1] - self.timestamp[0])
.total_seconds()/60)
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if (elapsed_time >= self.data_window and
len(self.timestamp) >= self.no_required_data):
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(Application.analysis,
{ECON1 + DX: 3.2})
dx_result = self.clear_data(dx_result)
data = {
ECON1 + DATA + RAT: ratemp,
ECON1 + DATA + MAT: matemp,
ECON1 + DATA + OAT: oatemp,
ECON1 + DATA + OAD: damper_signal,
ECON1 + DATA + ST: 2
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
dx_result = self.temperature_sensor_dx(dx_result, cur_time)
data = {
ECON1 + DATA + RAT: ratemp,
ECON1 + DATA + MAT: matemp,
ECON1 + DATA + OAT: oatemp,
ECON1 + DATA + OAD: damper_signal,
ECON1 + DATA + ST: 1
}
else:
data = {
ECON1 + DATA + RAT: ratemp,
ECON1 + DATA + MAT: matemp,
ECON1 + DATA + OAT: oatemp,
ECON1 + DATA + OAD: damper_signal,
ECON1 + DATA + ST: 0
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
def temperature_sensor_dx(self, dx_result, cur_time):
'''
If the detected problems(s) are
consistent then generate a fault message(s).
'''
oa_ma = [(x - y)
for x, y in zip(self.oat_values, self.mat_values)]
ra_ma = [(x - y)
for x, y in zip(self.rat_values, self.mat_values)]
ma_oa = [(y - x)
for x, y in zip(self.oat_values, self.mat_values)]
ma_ra = [(y - x)
for x, y in zip(self.rat_values, self.mat_values)]
avg_oa_ma = sum(oa_ma) / len(oa_ma)
avg_ra_ma = sum(ra_ma) / len(ra_ma)
avg_ma_oa = sum(ma_oa) / len(ma_oa)
avg_ma_ra = sum(ma_ra) / len(ma_ra)
# color_code = 'GREEN'
Application.pre_requiste_messages = []
Application.pre_msg_time = []
dx_table = {}
if len(self.open_oat) > self.no_required_data:
mat_oat_diff_list = \
[abs(x - y) for x, y in zip(self.open_oat, self.open_mat)]
open_damper_check = sum(mat_oat_diff_list) / len(mat_oat_diff_list)
if open_damper_check > self.oat_mat_check:
TempSensorDx.temp_sensor_problem = True
msg = ('The OAT and MAT sensor readings are not consistent '
'when the outdoor-air damper is fully open.')
# color_code = 'RED'
dx_msg = 0.1
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON1,
ECON1 + DX: dx_msg,
ECON1 + EI: 0.0
# 'color_code': color_code
}
dx_result.log(msg, logging.INFO)
dx_result.insert_table_row(Application.analysis, dx_table)
self.open_oat = []
self.open_mat = []
if avg_oa_ma > self.temp_diff_thr and avg_ra_ma > self.temp_diff_thr:
msg = ('Temperature sensor problem detected. Mixed-air '
'temperature is less than outdoor-air and return-air'
'temperatures.')
# color_code = 'RED'
dx_msg = 1.1
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON1,
ECON1 + DX: dx_msg,
ECON1 + EI: 0.0
# 'color_code': color_code
}
TempSensorDx.temp_sensor_problem = True
elif((avg_ma_oa) > self.temp_diff_thr and
(avg_ma_ra) > self.temp_diff_thr):
msg = ('Temperature sensor problem detected Mixed-air '
'temperature is greater than outdoor-air and return-air '
'temperatures.')
TempSensorDx.temp_sensor_problem = True
# color_code = 'RED'
dx_msg = 2.1
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON1,
ECON1 + DX: dx_msg,
ECON1 + EI: 0.0
# 'color_code': color_code
}
elif (TempSensorDx.temp_sensor_problem is None or not
TempSensorDx.temp_sensor_problem):
msg = 'No problems were detected.'
TempSensorDx.temp_sensor_problem = False
# color_code = 'GREEN'
dx_msg = 0.0
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON1,
ECON1 + DX: dx_msg,
ECON1 + EI: 0.0
# 'color_code': color_code
}
else:
msg = 'Diagnostic was inconclusive.'
TempSensorDx.temp_sensor_problem = False
# color_code = 'GREY'
dx_msg = 3.2
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON1,
ECON1 + DX: dx_msg,
ECON1 + EI: 0.0
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result.log(msg, logging.INFO)
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''
reinitialize class insufficient_oa data
'''
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.timestamp = []
return dx_result
class EconCorrectlyOn(object):
'''Air-side HVAC economizer diagnostic for AHU/RTU systems.
EconCorrectlyOn uses metered data from a BAS or controller to diagnose
if an AHU/RTU is economizing when it should.
'''
def __init__(self, oaf_economizing_threshold, open_damper_threshold,
data_window, no_required_data, cfm, eer):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.oad_values = []
self.timestamp = []
self.output_no_run = []
self.open_damper_threshold = float(open_damper_threshold)
self.oaf_economizing_threshold = float(oaf_economizing_threshold)
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.cfm = cfm
self.eer = eer
self.max_dx_time = 60
'''Application result messages'''
self.alg_result_messages = [
'Conditions are favorable for economizing but the '
'damper is frequently below 100% open.',
'No problems detected.',
'Conditions are favorable for economizing and the '
'damper is 100% open but the OAF indicates the unit '
'is not brining in near 100% OA.'
]
def econ_alg2(self, dx_result, cooling_call, oatemp, ratemp,
matemp, damper_signal, econ_condition, cur_time,
fan_sp):
'''Check app. pre-quisites and assemble data set for analysis.'''
if not cooling_call:
dx_result.log('The unit is not cooling, data corresponding to '
'{timestamp} will not be used for {name} diagnostic.'
.format(timestamp=str(cur_time), name=ECON2),
logging.DEBUG)
self.output_no_run.append(cur_time)
if ((self.output_no_run[-1] - self.output_no_run[0]) >=
td(minutes=(self.data_window))):
dx_result.log(
'{name}: the unit is not cooling or economizing, keep '
'collecting data.'.format(name=ECON2), logging.DEBUG)
self.output_no_run = []
return dx_result
if not econ_condition:
dx_result.log(
'{name}: Conditions are not favorable for economizing, '
'data corresponding to {ts} will not be used.'
.format(ts=str(cur_time), name=ECON2), logging.DEBUG)
self.output_no_run.append(cur_time)
if ((self.output_no_run[-1] - self.output_no_run[0]) >=
td(minutes=(self.data_window))):
dx_result.log(
'{name}: the unit is not cooling or economizing, keep '
'collecting data.'.format(name=ECON2), logging.DEBUG)
self.output_no_run = []
return dx_result
self.oat_values.append(oatemp)
self.mat_values.append(matemp)
self.rat_values.append(ratemp)
self.timestamp.append(cur_time)
self.oad_values.append(damper_signal)
fan_sp = fan_sp/100.0 if fan_sp is not None else 1.0
self.fan_speed_values.append(fan_sp)
self.timestamp.append(cur_time)
elapsed_time = ((self.timestamp[-1] - self.timestamp[0])
.total_seconds()/60)
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if (elapsed_time >= self.data_window and
len(self.timestamp) >= self.no_required_data):
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(Application.analysis,
{ECON2 + DX: 13.2})
dx_result = self.clear_data(dx_result)
data = {
ECON2 + DATA + RAT: ratemp,
ECON2 + DATA + MAT: matemp,
ECON2 + DATA + OAT: oatemp,
ECON2 + DATA + OAD: damper_signal,
ECON2 + DATA + CC: cooling_call,
ECON2 + DATA + FS: fan_sp,
ECON2 + DATA + EC: econ_condition,
ECON2 + DATA + ST: 2
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
dx_result = \
self.not_economizing_when_needed(dx_result, cur_time)
data = {
ECON2 + DATA + RAT: ratemp,
ECON2 + DATA + MAT: matemp,
ECON2 + DATA + OAT: oatemp,
ECON2 + DATA + OAD: damper_signal,
ECON2 + DATA + CC: cooling_call,
ECON2 + DATA + FS: fan_sp,
ECON2 + DATA + EC: econ_condition,
ECON2 + DATA + ST: 1
}
else:
data = {
ECON2 + DATA + RAT: ratemp,
ECON2 + DATA + MAT: matemp,
ECON2 + DATA + OAT: oatemp,
ECON2 + DATA + OAD: damper_signal,
ECON2 + DATA + CC: cooling_call,
ECON2 + DATA + FS: fan_sp,
ECON2 + DATA + EC: econ_condition,
ECON2 + DATA + ST: 0
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
def not_economizing_when_needed(self, dx_result, cur_time):
'''If the detected problems(s) are consistent then generate a fault
message(s).
'''
oaf = [(m - r) / (o - r) for o, r, m in zip(self.oat_values,
self.rat_values,
self.mat_values)]
avg_step = (
(self.timestamp[-1] - self.timestamp[0]).total_seconds()/60
if len(self.timestamp) > 1 else 1)
avg_oaf = sum(oaf) / len(oaf) * 100.0
avg_damper_signal = sum(self.oad_values)/len(self.oad_values)
energy_impact = 0.0
if avg_damper_signal < self.open_damper_threshold:
msg = (self.alg_result_messages[0])
color_code = 'RED'
dx_msg = 11.1
else:
if (100.0 - avg_oaf) <= self.oaf_economizing_threshold:
msg = (self.alg_result_messages[1])
color_code = 'GREEN'
energy_impact = 0.0
dx_msg = 10.0
else:
msg = (self.alg_result_messages[2])
color_code = 'RED'
dx_msg = 12.1
energy_calc = \
[1.08 * spd * self.cfm * (ma - oa) / (1000.0 * self.eer)
for ma, oa, spd in zip(self.mat_values, self.oat_values,
self.fan_speed_values)
if (ma - oa) > 0 and color_code == 'RED']
if energy_calc:
dx_time = (len(energy_calc) - 1) * avg_step if len(energy_calc) > 1 else 1.0
energy_impact = \
(sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time)
energy_impact = round(energy_impact, 2)
# energy_impact = '%s' % float('%.2g' % energy_impact)
# energy_impact = str(energy_impact)
# energy_impact = ''.join([energy_impact, ' kWh/h'])
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON2,
ECON2 + DX: dx_msg,
ECON2 + EI: energy_impact
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result.log(msg, logging.INFO)
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''
reinitialize class insufficient_oa data.
'''
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.timestamp = []
return dx_result
class EconCorrectlyOff(object):
'''Air-side HVAC economizer diagnostic for AHU/RTU systems.
EconCorrectlyOff uses metered data from a BAS or controller to diagnose
if an AHU/RTU is economizing when it should not.
'''
def __init__(self, data_window, no_required_data, min_damper_sp,
excess_damper_threshold, cooling_enabled_threshold,
desired_oaf, cfm, eer):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.oad_values = []
self.cool_call_values = []
self.cfm = cfm
self.eer = eer
self.fan_speed_values = []
self.timestamp = []
# Application result messages
self.alg_result_messages = \
['The outdoor-air damper should be at the minimum position but is '
'significantly above that value.',
'No problems detected.',
'The diagnostic led to inconclusive results, could not '
'verify the status of the economizer.']
self.max_dx_time = 60
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.min_damper_sp = float(min_damper_sp)
self.excess_damper_threshold = float(excess_damper_threshold)
self.cooling_enabled_threshold = float(cooling_enabled_threshold)
self.desired_oaf = float(desired_oaf)
def econ_alg3(self, dx_result, oatemp, ratemp, matemp,
damper_signal, econ_condition, cur_time,
fan_sp, cooling_call):
'''Check app. pre-quisites and assemble data set for analysis.'''
if econ_condition:
dx_result.log(self.alg_result_messages[2]
.join(['Data for to {ts} will not be used for this '
'diagnostic.'.format(ts=str(cur_time))]),
logging.DEBUG)
return dx_result
else:
self.oad_values.append(damper_signal)
self.oat_values.append(oatemp)
self.mat_values.append(matemp)
self.rat_values.append(ratemp)
self.timestamp.append(cur_time)
fan_sp = fan_sp/100.0 if fan_sp is not None else 1.0
self.fan_speed_values.append(fan_sp)
elapsed_time = ((self.timestamp[-1] - self.timestamp[0])
.total_seconds()/60)
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if (elapsed_time >= self.data_window and
len(self.timestamp) >= self.no_required_data):
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(Application.analysis,
{ECON3 + DX: 23.2})
dx_result = self.clear_data(dx_result)
data = {
ECON3 + DATA + RAT: ratemp,
ECON3 + DATA + MAT: matemp,
ECON3 + DATA + OAT: oatemp,
ECON3 + DATA + OAD: damper_signal,
ECON3 + DATA + CC: cooling_call,
ECON3 + DATA + FS: fan_sp,
ECON3 + DATA + EC: econ_condition,
ECON3 + DATA + ST: 2
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
dx_result = self.economizing_when_not_needed(dx_result, cur_time)
data = {
ECON3 + DATA + RAT: ratemp,
ECON3 + DATA + MAT: matemp,
ECON3 + DATA + OAT: oatemp,
ECON3 + DATA + OAD: damper_signal,
ECON3 + DATA + CC: cooling_call,
ECON3 + DATA + FS: fan_sp,
ECON3 + DATA + EC: econ_condition,
ECON3 + DATA + ST: 1
}
else:
data = {
ECON3 + DATA + RAT: ratemp,
ECON3 + DATA + MAT: matemp,
ECON3 + DATA + OAT: oatemp,
ECON3 + DATA + OAD: damper_signal,
ECON3 + DATA + CC: cooling_call,
ECON3 + DATA + FS: fan_sp,
ECON3 + DATA + EC: econ_condition,
ECON3 + DATA + ST: 0
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
def economizing_when_not_needed(self, dx_result, cur_time):
'''If the detected problems(s)
are consistent then generate a
fault message(s).
'''
avg_step = ((self.timestamp[-1] - self.timestamp[0]).total_seconds()/60
if len(self.timestamp) > 1 else 1)
desired_oaf = self.desired_oaf / 100.0
energy_impact = 0.0
energy_calc = [
(1.08 * spd * self.cfm * (ma - (oa * desired_oaf +
(ra * (1.0 - desired_oaf))))) /
(1000.0 * self.eer)
for ma, oa, ra, spd in zip(self.mat_values,
self.oat_values,
self.rat_values,
self.fan_speed_values)
if (ma - (oa * desired_oaf + (ra * (1.0 - desired_oaf)))) > 0]
avg_damper = sum(self.oad_values) / len(self.oad_values)
if (avg_damper - self.min_damper_sp) > self.excess_damper_threshold:
msg = self.alg_result_messages[0]
color_code = 'RED'
dx_msg = 21.1
else:
msg = 'No problems detected.'
color_code = 'GREEN'
energy_impact = 0.0
dx_msg = 20.0
if energy_calc and color_code == 'RED':
dx_time = (len(energy_calc) - 1) * \
avg_step if len(energy_calc) > 1 else 1.0
energy_impact = (
(sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time))
energy_impact = round(energy_impact, 2)
# energy_impact = '%s' % float('%.2g' % energy_impact)
# energy_impact = str(energy_impact)
# energy_impact = ''.join([energy_impact, ' kWh/h'])
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON3,
ECON3 + DX: dx_msg,
ECON3 + EI: energy_impact
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result.log(msg, logging.INFO)
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''
reinitialize class insufficient_oa data
'''
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.timestamp = []
return dx_result
class ExcessOA(object):
''' Air-side HVAC ventilation diagnostic.
ExcessOA uses metered data from a controller or
BAS to diagnose when an AHU/RTU is providing excess outdoor air.
'''
def __init__(self, data_window, no_required_data, excess_oaf_threshold,
min_damper_sp, excess_damper_threshold, desired_oaf,
cfm, eer):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.oad_values = []
self.cool_call_values = []
self.timestamp = []
self.fan_speed_values = []
# Application thresholds (Configurable)
self.cfm = cfm
self.eer = eer
self.max_dx_time = 60
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.excess_oaf_threshold = float(excess_oaf_threshold)
self.min_damper_sp = float(min_damper_sp)
self.desired_oaf = float(desired_oaf)
self.excess_damper_threshold = float(excess_damper_threshold)
def econ_alg4(self, dx_result, oatemp, ratemp, matemp,
damper_signal, econ_condition, cur_time,
fan_sp, cooling_call):
'''Check app. pre-quisites and assemble data set for analysis.'''
if econ_condition:
dx_result.log('The unit may be economizing, '
'data corresponding to {timestamp} '
'will not be used for this diagnostic.'
.format(timestamp=str(cur_time)), logging.DEBUG)
return dx_result
self.oad_values.append(damper_signal)
self.oat_values.append(oatemp)
self.rat_values.append(ratemp)
self.mat_values.append(matemp)
self.timestamp.append(cur_time)
fan_sp = fan_sp/100.0 if fan_sp is not None else 1.0
self.fan_speed_values.append(fan_sp)
elapsed_time = \
(self.timestamp[-1] - self.timestamp[0]).total_seconds()/60
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if (elapsed_time >= self.data_window and
len(self.timestamp) >= self.no_required_data):
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(Application.analysis,
{ECON4 + DX: 35.2})
dx_result = self.clear_data(dx_result)
data = {
ECON4 + DATA + RAT: ratemp,
ECON4 + DATA + MAT: matemp,
ECON4 + DATA + OAT: oatemp,
ECON4 + DATA + OAD: damper_signal,
ECON4 + DATA + CC: cooling_call,
ECON4 + DATA + FS: fan_sp,
ECON4 + DATA + EC: econ_condition,
ECON4 + DATA + ST: 2
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
dx_result = self.excess_oa(dx_result, cur_time)
data = {
ECON4 + DATA + RAT: ratemp,
ECON4 + DATA + MAT: matemp,
ECON4 + DATA + OAT: oatemp,
ECON4 + DATA + OAD: damper_signal,
ECON4 + DATA + CC: cooling_call,
ECON4 + DATA + FS: fan_sp,
ECON4 + DATA + EC: econ_condition,
ECON4 + DATA + ST: 1
}
else:
data = {
ECON4 + DATA + RAT: ratemp,
ECON4 + DATA + MAT: matemp,
ECON4 + DATA + OAT: oatemp,
ECON4 + DATA + OAD: damper_signal,
ECON4 + DATA + CC: cooling_call,
ECON4 + DATA + FS: fan_sp,
ECON4 + DATA + EC: econ_condition,
ECON4 + DATA + ST: 0
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
def excess_oa(self, dx_result, cur_time):
'''If the detected problems(s) are
consistent generate a fault message(s).
'''
avg_step = (
(self.timestamp[-1] - self.timestamp[0]).total_seconds()/60
if len(self.timestamp) > 1 else 1)
oaf = [(m - r) / (o - r) for o, r, m in zip(self.oat_values,
self.rat_values,
self.mat_values)]
avg_oaf = sum(oaf) / len(oaf) * 100
avg_damper = sum(self.oad_values) / len(self.oad_values)
desired_oaf = self.desired_oaf / 100.0
energy_calc = [
(1.08 * spd * self.cfm * (ma - (oa * desired_oaf +
(ra * (1.0 - desired_oaf))))) /
(1000.0 * self.eer)
for ma, oa, ra, spd in zip(self.mat_values,
self.oat_values,
self.rat_values,
self.fan_speed_values)
if (ma - (oa * desired_oaf + (ra * (1.0 - desired_oaf)))) > 0]
# color_code = 'GREY'
energy_impact = 0.0
msg = ''
dx_msg = 30.0
if avg_oaf < 0 or avg_oaf > 125.0:
msg = ('Inconclusive result, the OAF calculation led to an '
'unexpected value: {oaf}'.format(oaf=avg_oaf))
# color_code = 'GREY'
dx_msg = 31.2
dx_result.log(msg, logging.INFO)
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON4,
ECON4 + DX: dx_msg,
ECON4 + EI: 0.0
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result = self.clear_data(dx_result)
return dx_result
if (avg_damper - self.min_damper_sp) > self.excess_damper_threshold:
msg = ('The OAD should be at the minimum position for ventilation '
'but is significantly higher than this value.')
# color_code = 'RED'
dx_msg = 32.1
if energy_calc:
dx_time = \
(len(energy_calc) - 1) * avg_step if len(energy_calc) > 1 else 1.0
energy_impact = (
sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time)
if avg_oaf - self.desired_oaf > self.excess_oaf_threshold:
if dx_msg > 30.0:
msg += ('Excess outdoor-air is being provided, this could '
'increase heating and cooling energy consumption.')
dx_msg = 34.1
else:
msg = ('Excess outdoor air is being provided, this could '
'increase heating and cooling energy consumption.')
dx_msg = 33.1
# color_code = 'RED'
if energy_calc:
dx_time = \
(len(energy_calc) - 1) * avg_step if len(energy_calc) > 1 else 1.0
energy_impact = (
sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time)
# energy_impact = '%s' % float('%.2g' % energy_impact)
# energy_impact = str(energy_impact)
# energy_impact = ''.join([energy_impact, ' kWh/h'])
elif not dx_msg:
msg = ('The calculated outdoor-air fraction is within '
'configured limits.')
# color_code = 'GREEN'
energy_impact = 0.0
dx_msg = 30.0
energy_impact = round(energy_impact, 2)
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON4,
ECON4 + DX: dx_msg,
ECON4 + EI: energy_impact
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result.log(msg, logging.INFO)
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''reinitialize class insufficient_oa data.'''
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.timestamp = []
return dx_result
class InsufficientOA(object):
''' Air-side HVAC ventilation diagnostic.
insufficient_oa_intake uses metered data from a controller or
BAS to diagnose when an AHU/RTU is providing inadequate ventilation.
'''
def __init__(self, data_window, no_required_data,
ventilation_oaf_threshold, min_damper_sp,
insufficient_damper_threshold, desired_oaf):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.oad_values = []
self.cool_call_values = []
self.timestamp = []
'''Application thresholds (Configurable)'''
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.max_dx_time = 60
self.ventilation_oaf_threshold = float(ventilation_oaf_threshold)
self.insufficient_damper_threshold = float(
insufficient_damper_threshold)
self.min_damper_sp = float(min_damper_sp)
self.desired_oaf = float(desired_oaf)
def econ_alg5(self, dx_result, oatemp, ratemp, matemp, damper_signal,
econ_condition, cur_time, cooling_call):
'''Check app. pre-quisites and assemble data set for analysis.'''
self.oat_values.append(oatemp)
self.rat_values.append(ratemp)
self.mat_values.append(matemp)
self.oad_values.append(damper_signal)
self.timestamp.append(cur_time)
elapsed_time = ((self.timestamp[-1] - self.timestamp[0])
.total_seconds()/60)
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if (elapsed_time >= self.data_window and
len(self.timestamp) >= self.no_required_data):
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(Application.analysis,
{ECON5 + DX: 44.2})
dx_result = self.clear_data(dx_result)
data = {
ECON5 + DATA + RAT: ratemp,
ECON5 + DATA + MAT: matemp,
ECON5 + DATA + OAT: oatemp,
ECON5 + DATA + OAD: damper_signal,
ECON5 + DATA + CC: cooling_call,
ECON5 + DATA + EC: econ_condition,
ECON5 + DATA + ST: 2
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
dx_result = self.insufficient_oa(
dx_result, cur_time)
data = {
ECON5 + DATA + RAT: ratemp,
ECON5 + DATA + MAT: matemp,
ECON5 + DATA + OAT: oatemp,
ECON5 + DATA + OAD: damper_signal,
ECON5 + DATA + CC: cooling_call,
ECON5 + DATA + EC: econ_condition,
ECON5 + DATA + ST: 1
}
else:
data = {
ECON5 + DATA + RAT: ratemp,
ECON5 + DATA + MAT: matemp,
ECON5 + DATA + OAT: oatemp,
ECON5 + DATA + OAD: damper_signal,
ECON5 + DATA + CC: cooling_call,
ECON5 + DATA + EC: econ_condition,
ECON5 + DATA + ST: 0
}
dx_result.insert_table_row(Application.analysis, data)
return dx_result
def insufficient_oa(self, dx_result, cur_time):
'''If the detected problems(s) are
consistent generate a fault message(s).
'''
oaf = [(m - r) / (o - r) for o, r, m in zip(self.oat_values,
self.rat_values,
self.mat_values)]
avg_oaf = sum(oaf) / len(oaf) * 100.0
avg_damper_signal = sum(self.oad_values) / len(self.oad_values)
if avg_oaf < 0 or avg_oaf > 125.0:
msg = ('Inconclusive result, the OAF calculation led to an '
'unexpected value: {oaf}'.format(oaf=avg_oaf))
# color_code = 'GREY'
dx_result.log(msg, logging.INFO)
dx_msg = 41.2
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON5,
ECON5 + DX: dx_msg,
ECON5 + EI: 0.0
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result = self.clear_data(dx_result)
return dx_result
msg = ''
# Current deployment has minimum outdoor-air damper and
# economizer damper. Ventilation is not dependent on economizer damper.
# if (
# (self.min_damper_sp - avg_damper_signal) >
# self.insufficient_damper_threshold):
# msg = ('Outdoor-air damper is '
# 'significantly below the minimum '
# 'configured damper position.')
# # color_code = 'RED'
# dx_msg = 42.1
# dx_table = {
# # 'datetime': cur_time,
# # 'diagnostic_name': ECON5,
# ECON5 + DX: dx_msg,
# ECON5 + EI: 0.0
# # 'color_code': color_code
# }
# dx_result.log(msg, logging.INFO)
# dx_result.insert_table_row(Application.analysis, dx_table)
# dx_result = self.clear_data(dx_result)
# return dx_result
if (self.desired_oaf - avg_oaf) > self.ventilation_oaf_threshold:
msg = 'Insufficient outdoor-air is being provided for ventilation.'
# color_code = 'RED'
dx_msg = 43.1
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON5,
ECON5 + DX: dx_msg,
ECON5 + EI: 0.0
# 'color_code': color_code
}
else:
msg = ('The calculated outdoor-air fraction was within '
'acceptable limits.')
# color_code = 'GREEN'
dx_msg = 40.0
dx_table = {
# 'datetime': cur_time,
# 'diagnostic_name': ECON5,
ECON5 + DX: dx_msg,
ECON5 + EI: 0.0
# 'color_code': color_code
}
dx_result.insert_table_row(Application.analysis, dx_table)
dx_result.log(msg, logging.INFO)
Application.pre_msg_time = []
Application.pre_requiste_messages = []
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''reinitialize class insufficient_oa data.'''
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.timestamp = []
return dx_result
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for ml2 rpc
"""
import collections
import mock
from sqlalchemy.orm import exc
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.plugins.ml2.drivers import type_tunnel
from neutron.plugins.ml2 import rpc as plugin_rpc
from neutron.tests import base
class RpcCallbacksTestCase(base.BaseTestCase):
def setUp(self):
super(RpcCallbacksTestCase, self).setUp()
self.callbacks = plugin_rpc.RpcCallbacks(mock.Mock(), mock.Mock())
self.manager = mock.patch.object(
plugin_rpc.manager, 'NeutronManager').start()
self.l3plugin = mock.Mock()
self.manager.get_service_plugins.return_value = {
'L3_ROUTER_NAT': self.l3plugin
}
self.plugin = self.manager.get_plugin()
def _test_update_device_up(self, extensions, kwargs):
with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin'
'._device_to_port_id'):
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.callbacks.update_device_up(mock.ANY, **kwargs)
def test_update_device_up_without_dvr(self):
kwargs = {
'agent_id': 'foo_agent',
'device': 'foo_device'
}
self._test_update_device_up(['router'], kwargs)
self.assertFalse(self.l3plugin.dvr_vmarp_table_update.call_count)
def test_update_device_up_with_dvr(self):
kwargs = {
'agent_id': 'foo_agent',
'device': 'foo_device'
}
self._test_update_device_up(['router', 'dvr'], kwargs)
self.l3plugin.dvr_vmarp_table_update.assert_called_once_with(
mock.ANY, mock.ANY, 'add')
def test_update_device_up_with_dvr_when_port_not_found(self):
kwargs = {
'agent_id': 'foo_agent',
'device': 'foo_device'
}
self.l3plugin.dvr_vmarp_table_update.side_effect = (
exceptions.PortNotFound(port_id='foo_port_id'))
self._test_update_device_up(['router', 'dvr'], kwargs)
self.assertTrue(self.l3plugin.dvr_vmarp_table_update.call_count)
def test_get_device_details_without_port_context(self):
self.plugin.get_bound_port_context.return_value = None
self.assertEqual(
{'device': 'fake_device'},
self.callbacks.get_device_details('fake_context',
device='fake_device'))
def test_get_device_details_port_context_without_bounded_segment(self):
self.plugin.get_bound_port_context().bound_segment = None
self.assertEqual(
{'device': 'fake_device'},
self.callbacks.get_device_details('fake_context',
device='fake_device'))
def test_get_device_details_port_status_equal_new_status(self):
port = collections.defaultdict(lambda: 'fake')
self.plugin.get_bound_port_context().current = port
for admin_state_up in (True, False):
new_status = (constants.PORT_STATUS_BUILD if admin_state_up
else constants.PORT_STATUS_DOWN)
for status in (constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_BUILD,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_ERROR):
port['admin_state_up'] = admin_state_up
port['status'] = status
self.plugin.update_port_status.reset_mock()
self.callbacks.get_device_details('fake_context',
host='fake_host')
self.assertEqual(status == new_status,
not self.plugin.update_port_status.called)
def test_get_devices_details_list(self):
devices = [1, 2, 3, 4, 5]
kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'}
with mock.patch.object(self.callbacks, 'get_device_details',
side_effect=devices) as f:
res = self.callbacks.get_devices_details_list('fake_context',
devices=devices,
**kwargs)
self.assertEqual(devices, res)
self.assertEqual(len(devices), f.call_count)
calls = [mock.call('fake_context', device=i, **kwargs)
for i in devices]
f.assert_has_calls(calls)
def test_get_devices_details_list_with_empty_devices(self):
with mock.patch.object(self.callbacks, 'get_device_details') as f:
res = self.callbacks.get_devices_details_list('fake_context')
self.assertFalse(f.called)
self.assertEqual([], res)
def _test_update_device_not_bound_to_host(self, func):
self.plugin.port_bound_to_host.return_value = False
self.plugin._device_to_port_id.return_value = 'fake_port_id'
res = func('fake_context', device='fake_device', host='fake_host')
self.plugin.port_bound_to_host.assert_called_once_with('fake_context',
'fake_port_id',
'fake_host')
return res
def test_update_device_up_with_device_not_bound_to_host(self):
self.assertIsNone(self._test_update_device_not_bound_to_host(
self.callbacks.update_device_up))
def test_update_device_down_with_device_not_bound_to_host(self):
self.assertEqual(
{'device': 'fake_device', 'exists': True},
self._test_update_device_not_bound_to_host(
self.callbacks.update_device_down))
def test_update_device_down_call_update_port_status(self):
self.plugin.update_port_status.return_value = False
self.plugin._device_to_port_id.return_value = 'fake_port_id'
self.assertEqual(
{'device': 'fake_device', 'exists': False},
self.callbacks.update_device_down('fake_context',
device='fake_device',
host='fake_host'))
self.plugin.update_port_status.assert_called_once_with(
'fake_context', 'fake_port_id', constants.PORT_STATUS_DOWN,
'fake_host')
def test_update_device_down_call_update_port_status_failed(self):
self.plugin.update_port_status.side_effect = exc.StaleDataError
self.assertEqual({'device': 'fake_device', 'exists': False},
self.callbacks.update_device_down(
'fake_context', device='fake_device'))
class RpcApiTestCase(base.BaseTestCase):
def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_msg = rpcapi.make_msg(method, **kwargs)
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
rpc = n_rpc.RpcProxy
with mock.patch.object(rpc, rpc_method) as rpc_method_mock:
rpc_method_mock.return_value = expected_retval
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
additional_args = {}
if topic:
additional_args['topic'] = topic
if expected_version:
additional_args['version'] = expected_version
expected = [
mock.call(ctxt, expected_msg, **additional_args)
]
rpc_method_mock.assert_has_calls(expected)
def test_delete_network(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_tunnel_update(self):
rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT)
self._test_rpc_api(rpcapi,
topics.get_topic_name(topics.AGENT,
type_tunnel.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='fanout_cast',
tunnel_ip='fake_ip', tunnel_type='gre')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_devices_details_list(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'get_devices_details_list', rpc_method='call',
devices=['fake_device1', 'fake_device2'],
agent_id='fake_agent_id', host='fake_host',
version='1.3')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_rpc_api(rpcapi, None,
'update_device_up', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
| |
import os
from tempfile import mkdtemp, mkstemp
from shutil import rmtree
from os import makedirs
from os.path import dirname, join
from unittest import TestCase
from carbon.conf import get_default_parser, parse_options, read_config
from carbon.exceptions import CarbonConfigException
class FakeParser(object):
def __init__(self):
self.called = []
def parse_args(self, args):
return object(), args
def print_usage(self):
self.called.append("print_usage")
class FakeOptions(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
class DefaultParserTest(TestCase):
def test_default_parser(self):
"""Check default parser settings."""
parser = get_default_parser()
self.assertTrue(parser.has_option("--debug"))
self.assertEqual(None, parser.defaults["debug"])
self.assertTrue(parser.has_option("--profile"))
self.assertEqual(None, parser.defaults["profile"])
self.assertTrue(parser.has_option("--pidfile"))
self.assertEqual(None, parser.defaults["pidfile"])
self.assertTrue(parser.has_option("--config"))
self.assertEqual(None, parser.defaults["config"])
self.assertTrue(parser.has_option("--logdir"))
self.assertEqual(None, parser.defaults["logdir"])
self.assertTrue(parser.has_option("--instance"))
self.assertEqual("a", parser.defaults["instance"])
class ParseOptionsTest(TestCase):
def test_no_args_prints_usage_and_exit(self):
"""
If no arguments are provided, the usage help will be printed and a
SystemExit exception will be raised.
"""
parser = FakeParser()
self.assertRaises(SystemExit, parse_options, parser, ())
self.assertEqual(["print_usage"], parser.called)
def test_no_valid_args_prints_usage_and_exit(self):
"""
If an argument which isn't a valid command was provided, 'print_usage'
will be called and a SystemExit exception will be raised.
"""
parser = FakeParser()
self.assertRaises(SystemExit, parse_options, parser, ("bazinga!",))
self.assertEqual(["print_usage"], parser.called)
def test_valid_args(self):
"""
If a valid argument is provided, it will be returned along with
options.
"""
parser = FakeParser()
options, args = parser.parse_args(("start",))
self.assertEqual(("start",), args)
class ReadConfigTest(TestCase):
def makeFile(self, content=None, basename=None, dirname=None):
"""
Create a temporary file with content
Deletes the file after tests
"""
if basename is not None:
path = join(dirname, basename)
else:
fd, path = mkstemp(dir=dirname)
os.close(fd)
self.addCleanup(os.unlink, path)
if content is not None:
with open(path, "w") as f:
f.write(content)
return path
def test_root_dir_is_required(self):
"""
At minimum, the caller must provide a 'ROOT_DIR' setting.
"""
try:
read_config("carbon-foo", FakeOptions(config=None))
except CarbonConfigException as e:
self.assertEqual("Either ROOT_DIR or GRAPHITE_ROOT "
"needs to be provided.", str(e))
else:
self.fail("Did not raise exception.")
def test_config_is_not_required(self):
"""
If the '--config' option is not provided, it defaults to
ROOT_DIR/conf/carbon.conf.
"""
root_dir = mkdtemp()
self.addCleanup(rmtree, root_dir)
conf_dir = join(root_dir, "conf")
makedirs(conf_dir)
self.makeFile(content="[foo]",
basename="carbon.conf",
dirname=conf_dir)
options = FakeOptions(config=None, instance=None,
pidfile=None, logdir=None)
read_config("carbon-foo", options, ROOT_DIR=root_dir)
self.assertEqual(join(root_dir, "conf", "carbon.conf"),
options["config"])
def test_config_dir_from_environment(self):
"""
If the 'GRAPHITE_CONFIG_DIR' variable is set in the environment, then
'CONFIG_DIR' will be set to that directory.
"""
root_dir = mkdtemp()
self.addCleanup(rmtree, root_dir)
conf_dir = join(root_dir, "configs", "production")
makedirs(conf_dir)
self.makeFile(content="[foo]",
basename="carbon.conf",
dirname=conf_dir)
orig_value = os.environ.get("GRAPHITE_CONF_DIR", None)
if orig_value is not None:
self.addCleanup(os.environ.__setitem__,
"GRAPHITE_CONF_DIR",
orig_value)
else:
self.addCleanup(os.environ.__delitem__, "GRAPHITE_CONF_DIR")
os.environ["GRAPHITE_CONF_DIR"] = conf_dir
settings = read_config("carbon-foo",
FakeOptions(config=None, instance=None,
pidfile=None, logdir=None),
ROOT_DIR=root_dir)
self.assertEqual(conf_dir, settings.CONF_DIR)
def test_conf_dir_defaults_to_config_dirname(self):
"""
The 'CONF_DIR' setting defaults to the parent directory of the
provided configuration file.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(dirname(config), settings.CONF_DIR)
def test_storage_dir_relative_to_root_dir(self):
"""
The 'STORAGE_DIR' setting defaults to the 'storage' directory relative
to the 'ROOT_DIR' setting.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("foo", "storage"), settings.STORAGE_DIR)
def test_log_dir_relative_to_storage_dir(self):
"""
The 'LOG_DIR' setting defaults to a program-specific directory relative
to the 'STORAGE_DIR' setting.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("foo", "storage", "log", "carbon-foo"),
settings.LOG_DIR)
def test_log_dir_relative_to_provided_storage_dir(self):
"""
Providing a different 'STORAGE_DIR' in defaults overrides the default
of being relative to 'ROOT_DIR'.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo", STORAGE_DIR="bar")
self.assertEqual(join("bar", "log", "carbon-foo"),
settings.LOG_DIR)
def test_log_dir_for_instance_relative_to_storage_dir(self):
"""
The 'LOG_DIR' setting defaults to a program-specific directory relative
to the 'STORAGE_DIR' setting. In the case of an instance, the instance
name is appended to the directory.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance="x",
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("foo", "storage", "log",
"carbon-foo", "carbon-foo-x"),
settings.LOG_DIR)
def test_log_dir_for_instance_relative_to_provided_storage_dir(self):
"""
Providing a different 'STORAGE_DIR' in defaults overrides the default
of being relative to 'ROOT_DIR'. In the case of an instance, the
instance name is appended to the directory.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance="x",
pidfile=None, logdir=None),
ROOT_DIR="foo", STORAGE_DIR="bar")
self.assertEqual(join("bar", "log", "carbon-foo", "carbon-foo-x"),
settings.LOG_DIR)
def test_pidfile_relative_to_storage_dir(self):
"""
The 'pidfile' setting defaults to a program-specific filename relative
to the 'STORAGE_DIR' setting.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("foo", "storage", "carbon-foo.pid"),
settings.pidfile)
def test_pidfile_in_options_has_precedence(self):
"""
The 'pidfile' option from command line overrides the default setting.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile="foo.pid", logdir=None),
ROOT_DIR="foo")
self.assertEqual("foo.pid", settings.pidfile)
def test_pidfile_for_instance_in_options_has_precedence(self):
"""
The 'pidfile' option from command line overrides the default setting
for the instance, if one is specified.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance="x",
pidfile="foo.pid", logdir=None),
ROOT_DIR="foo")
self.assertEqual("foo.pid", settings.pidfile)
def test_storage_dir_as_provided(self):
"""
Providing a 'STORAGE_DIR' in defaults overrides the root-relative
default.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo", STORAGE_DIR="bar")
self.assertEqual("bar", settings.STORAGE_DIR)
def test_log_dir_as_provided(self):
"""
Providing a 'LOG_DIR' in defaults overrides the storage-relative
default.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo", STORAGE_DIR="bar", LOG_DIR='baz')
self.assertEqual("baz", settings.LOG_DIR)
def test_log_dir_from_options(self):
"""
Providing a 'LOG_DIR' in the command line overrides the
storage-relative default.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir="baz"),
ROOT_DIR="foo")
self.assertEqual("baz", settings.LOG_DIR)
def test_log_dir_for_instance_from_options(self):
"""
Providing a 'LOG_DIR' in the command line overrides the
storage-relative default for the instance.
"""
config = self.makeFile(content="[foo]")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance="x",
pidfile=None, logdir="baz"),
ROOT_DIR="foo")
self.assertEqual("baz", settings.LOG_DIR)
def test_storage_dir_from_config(self):
"""
Providing a 'STORAGE_DIR' in the configuration file overrides the
root-relative default.
"""
config = self.makeFile(content="[foo]\nSTORAGE_DIR = bar")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual("bar", settings.STORAGE_DIR)
def test_log_dir_from_config(self):
"""
Providing a 'LOG_DIR' in the configuration file overrides the
storage-relative default.
"""
config = self.makeFile(content="[foo]\nLOG_DIR = baz")
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual("baz", settings.LOG_DIR)
def test_log_dir_from_instance_config(self):
"""
Providing a 'LOG_DIR' for the specific instance in the configuration
file overrides the storage-relative default. The actual value will have
the instance name appended to it and ends with a forward slash.
"""
config = self.makeFile(
content=("[foo]\nLOG_DIR = baz\n"
"[foo:x]\nLOG_DIR = boo"))
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance="x",
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual("boo/carbon-foo-x", settings.LOG_DIR)
def test_pid_dir_depends_on_storage_dir(self):
"""
Tests 'STORAGE_DIR' dependency 'PID_DIR'
"""
config = self.makeFile(
content=("[foo]\n"
"STORAGE_DIR = bar"))
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual("bar", settings.PID_DIR)
def test_log_dir_depends_on_storage_dir(self):
"""
Tests 'STORAGE_DIR' dependency 'LOG_DIR'
"""
config = self.makeFile(
content=("[foo]\n"
"STORAGE_DIR = bar"))
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("bar", "log", "carbon-foo"), settings.LOG_DIR)
def test_local_data_dir_depends_on_storage_dir(self):
"""
Tests 'STORAGE_DIR' dependency 'LOCAL_DATA_DIR'
"""
config = self.makeFile(
content=("[foo]\n"
"STORAGE_DIR = bar"))
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("bar", "whisper"), settings.LOCAL_DATA_DIR)
def test_whitelists_dir_depends_on_storage_dir(self):
"""
Tests 'STORAGE_DIR' dependency 'WHITELISTS_DIR'
"""
config = self.makeFile(
content=("[foo]\n"
"STORAGE_DIR = bar"))
settings = read_config(
"carbon-foo",
FakeOptions(config=config, instance=None,
pidfile=None, logdir=None),
ROOT_DIR="foo")
self.assertEqual(join("bar", "lists"), settings.WHITELISTS_DIR)
| |
"""
Go generator
"""
import ast
from .. import (
parse_eval_stmt,
prepare_expr
)
from .. import (
EvalStmt,
IfStmt,
ThenStmt,
ElseStmt,
ExecuteStmt,
)
from .base import JavaLikeGenerator
class GoGenerator(JavaLikeGenerator):
""" Go Generator """
bd_class = 'decimal.Decimal'
bd_class_constructor = 'decimal.NewFromInt'
list_const_parens = ('{', '}')
stmt_separator = ''
allow_constants = False
instance_var = 't'
def __init__(self, parser, outfile, class_name=None, indent=None, package_name='default'):
super(GoGenerator, self).__init__(parser, outfile, class_name, indent)
self.package_name = package_name
self.type_map = {} # Keep a list of known variable types
def generate(self):
wr = self.writer
self._write_comment("This file is automatically generated by LstGen, do not edit!", False)
wr.writeln('package {}'.format(self.package_name or 'tax'))
wr.nl()
wr.writeln('import ( "github.com/shopspring/decimal" )')
wr.nl()
# Define the model as a struct.
with self.writer.indent('type {} struct'.format(self.class_name)):
wr.writeln("// ------------------------ Constants -------------------------")
for const in self.parser.constants:
if const.comment is not None:
wr.nl()
self._write_comment(const.comment, False)
if const.type.endswith('[]'):
const.value = '[{}]'.format(const.value[1:-1])
thetype = self._convert_vartype(const.type)
wr.writeln('{const.name} {thetype}'.format(
const=const,
thetype=thetype
))
self.type_map[const.name] = thetype
for (comment, variables) in [
('Input variables', self.parser.input_vars),
('Output variables', self.parser.output_vars),
('Internal variables', self.parser.internal_vars),
]:
wr.nl()
wr.writeln('// {}'.format(comment))
wr.writeln("// ------------------------ Variables -------------------------")
for var in variables:
if var.comment is not None:
wr.nl()
self._write_comment(var.comment, False)
vartype = self._convert_vartype(var.type)
wr.writeln('{} {}'.format(var.name, vartype))
self.type_map[var.name] = vartype
# Define the New() method that also sets default values.
with self.writer.indent('func New{cls}() *{cls}'.format(cls=self.class_name)):
with self.writer.indent('return &{}'.format(self.class_name)):
wr.writeln("// ------------------------ Constants -------------------------")
for const in self.parser.constants:
if const.comment is not None:
wr.nl()
self._write_comment(const.comment, False)
if const.type.endswith('[]'):
const.value = '[{}]'.format(const.value[1:-1])
value = self.convert_to_go(const.value)
wr.writeln('{const.name}: {value},'.format(
const=const,
value=value
))
wr.nl()
wr.writeln("// ------------------------ Variables -------------------------")
for (comment, variables) in [
('Input variables', self.parser.input_vars),
('Output variables', self.parser.output_vars),
('Internal variables', self.parser.internal_vars),
]:
for var in variables:
if var.default is None:
continue
if var.comment is not None:
wr.nl()
self._write_comment(var.comment, False)
value = self.convert_to_go(var.default)
wr.writeln('{}: {},'.format(var.name, value))
# create setters for input vars
for var in self.parser.input_vars:
wr.nl()
signature = 'func ({instance} *{cls}) Set{cap}(value {type})'.format(
instance=self.instance_var,
cls=self.class_name,
cap=var.name.capitalize(),
type=self._convert_vartype(var.type)
)
with wr.indent(signature):
wr.writeln('{instance}.{name} = value'.format(
instance=self.instance_var,
name=var.name
))
# create getters for output vars
for var in self.parser.output_vars:
wr.nl()
signature = 'func ({instance} *{cls}) Get{cap}() {type}'.format(
instance=self.instance_var,
cls=self.class_name,
cap=var.name.capitalize(),
type=self._convert_vartype(var.type)
)
with wr.indent(signature):
wr.writeln('return {instance}.{name}'.format(
instance=self.instance_var,
name=var.name
))
self._write_method(self.parser.main_method)
for method in self.parser.methods:
self._write_method(method)
wr.nl()
def _convert_vartype(self, vartype):
return {
'BigDecimal.ZERO': 'decimal.NewFromInt(0)',
'BigDecimal[]': '[]'+self.bd_class,
'BigDecimal': ''+self.bd_class,
'int': 'int64',
'double': 'float64',
}[vartype]
def _conv_list(self, node):
res = super(GoGenerator, self)._conv_list(node)
return ['[]' + self.bd_class] + res
def _conv_attribute(self, node):
clsmethod = False
if node.attr == 'valueOf':
node.attr = 'NewFromInt'
clsmethod = True
elif node.attr == 'ZERO':
node.attr = 'NewFromInt(0)'
clsmethod = True
elif node.attr == 'ONE':
node.attr = 'NewFromInt(1)'
clsmethod = True
elif node.attr == 'TEN':
node.attr = 'NewFromInt(10)'
clsmethod = True
elif node.attr == 'longValue':
node.attr = 'IntPart'
elif node.attr == 'add':
node.attr = 'Add'
elif node.attr == 'subtract':
node.attr = 'Sub'
elif node.attr == 'multiply':
node.attr = 'Mul'
elif node.attr == 'divide':
node.attr = 'Div'
elif node.attr == 'compareTo':
node.attr = 'Cmp'
elif node.attr == 'setScale':
node.attr = 'Round'
elif node.attr in ('ROUND_UP', 'ROUND_DOWN'):
pass
else:
raise NotImplementedError("Unmapped attribute {}".format(node.attr))
if clsmethod:
return ['decimal', self.property_accessor_op, node.attr]
return (
self.to_code(node.value) +
[self.property_accessor_op, node.attr]
)
def _get_decimal_constructor_from_node(self, node):
try:
int(str(node.n))
return 'NewFromInt'
except ValueError:
return 'NewFromFloat'
except AttributeError: # If node.nodes[0] is not a value, but e.g. a name or a call
# Unfortunately it's not possible to support all cases due to the strict
# typing of Go. However, we are "lucky":
if node.__class__.__name__ == 'Name':
# For now, all variable assignments that create new values happen to be integers.
rtype = self.type_map.get(node.id)
if rtype == 'int64':
return 'NewFromInt'
elif rtype == 'float64':
return 'NewFromFloat'
raise NotImplementedError(node.id+' has unsupported type '+repr(rtype))
elif node.__class__.__name__ == 'Call':
# The node is a function call. Luckily so far, all calls return integers...
# luckily, the only function calls in assignments are calls to Decimal methods,
# which we can ensure to return int64
return 'NewFromInt'
elif node.__class__.__name__ == 'Constant':
try:
int(str(node.value))
return 'NewFromInt'
except ValueError:
return 'NewFromFloat'
elif node.__class__.__name__ == 'BinOp':
# For math operations, the type depends on the operators.
return self._get_decimal_constructor_from_node(node.left)
raise NotImplementedError("unsupported node type {}".format(
node.__class__.__name__
))
def _conv_call(self, node):
caller = self.to_code(node.func)
# Fix calls to the Round function (Go's Decimal doesn't accept a rounding parameter,
# so we need to map it to another function here.)
if caller[-1] == 'Round' and len(node.args) > 1:
if node.args[-1] == 'ROUND_DOWN':
caller[-1] = 'RoundCash'
node.args = node.args[:1]
# Similarly for the Div function.
elif caller[-1] == 'Div' and len(node.args) > 1:
# FIXME: Div does not support ROUND_DOWN :-(
caller[-1] = 'DivRound'
node.args = node.args[:2]
# Fix integer initialization-
elif caller[-1] == 'NewFromInt':
caller[-1] = self._get_decimal_constructor_from_node(node.args[0])
args = []
for (idx, arg) in enumerate(node.args):
args += self.to_code(arg)
if idx != len(node.args) - 1:
args.append(self.call_args_delim)
return (
caller +
[self.callable_exec_parens[0]] +
args +
[self.callable_exec_parens[1]]
)
def _write_method(self, method):
self.writer.nl()
if method.comment:
self._write_comment(method.comment, False)
signature = 'func ({instance} *{cls}) {name}()'.format(
instance=self.instance_var,
cls=self.class_name,
name=method.name
)
# actual method body
with self.writer.indent(signature):
self._write_stmt_body(method)
def convert_to_go(self, value):
""" Converts java pseudo code into valid java code """
tree = ast.parse(prepare_expr(value))
node = tree.body[0].value
return ''.join(self.to_code(node))
| |
import os
import environ
from django.utils import six
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('misrutasmx')
# SECURITY WARNING: keep the secret key used in production secret!
# ------------------------------------------------------------------------------
SECRET_KEY = 'x*i5)0^idki5oct&q!+hto7%dwq4)2kb^-*ed3l=53e3k4$rp!'
# Debug
# ------------------------------------------------------------------------------
DEBUG = True
# Allowed Host
# ------------------------------------------------------------------------------
ALLOWED_HOSTS = []
# Application definition
# ------------------------------------------------------------------------------
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
'django.contrib.sites',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'storages',
'collectfast',
)
# Apps specific for this project go here.
LOCAL_APPS = (
# Your stuff: custom apps go here
'project.app',
'django_facebook',
)
FACEBOOK_APP_ID = '470579373130409'
FACEBOOK_APP_SECRET = '1ee7911efd6231d8bcc9a4e51aee14c2'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'config.urls'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'project.app.context_processors.globalvar',
'django_facebook.context_processors.facebook',
],
},
},
]
SITE_ID = 1
# ALLAUTH CONFIGURATION
# ------------------------------------------------------------------------------
# http://django-allauth.readthedocs.org/en/latest
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'app.User'
LOGIN_REDIRECT_URL = 'index'
LOGIN_URL = 'landing'
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = \
{'facebook':
{
'METHOD': 'oauth2',
'SCOPE':
[
'email', 'public_profile', 'user_friends'
],
'AUTH_PARAMS':
{
'auth_type': 'reauthenticate'
},
'FIELDS':
[
'id',
'email',
'name',
'first_name',
'last_name',
'verified',
'locale',
'timezone',
'link',
'gender',
'updated_time'
],
'EXCHANGE_TOKEN': True,
'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False,
'VERSION': 'v2.4'
}
}
# CRISPY TEMPLATE
# ------------------------------------------------------------------------------
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
WSGI_APPLICATION = 'config.wsgi.application'
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'misrutasmx.sqlite3',
'USER': 'eadmailmx',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Internationalization
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-MX'
TIME_ZONE = 'America/Mexico_City'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
MEDIA_URL = '/media/'
# Mail settings
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = 'MisRutasMX <sistemas@e-admin.mx>' #CHANGEME!!!#
EMAIL_HOST = 'email-smtp.us-east-1.amazonaws.com' #CHANGEME!!!#
EMAIL_HOST_USER = 'AKIAJ2P4EII4LYULTB4Q' #CHANGEME!!!#
EMAIL_HOST_PASSWORD = 'Agws0vyhjAsulzVNQsNb+LueMx4AEGYw/jPZyEMTtkQD' #CHANGEME!!!#
EMAIL_PORT = 587 #CHANGEME!!!#
EMAIL_USE_TLS = True #CHANGEME!!!#
EMAIL_SUBJECT_PREFIX = '[MisRutasMX]' #CHANGEME!!!#
# Celery
# ------------------------------------------------------------------------------
#INSTALLED_APPS += ('project.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = 'django://'
# LOCAL CONFIGURATION
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
INSTALLED_APPS += ('django_extensions', )
# SLUGLIFIER
# ------------------------------------------------------------------------------
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# VARIABLES
# ------------------------------------------------------------------------------
APPTITLE = 'MisRutas.MX'
TEXTTITLE = 'MisRutas.MX'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins','console'],
'level': 'ERROR',
'propagate': True,
},
'django_facebook.models': {
'handlers': ['mail_admins','console'],
'level': 'ERROR',
'propagate': True,
}
}
}
try:
from .var_production import *
from boto.s3.connection import OrdinaryCallingFormat
# See: http://django-storages.readthedocs.org/en/latest/index.html
# Static and Media Storage
AWS_ACCESS_KEY_ID = VAR_DJANGO_AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = VAR_AWS_SECRET_ACCESS_KEY
AWS_STORAGE_BUCKET_NAME = VAR_AWS_STORAGE_BUCKET_NAME
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing stored files.
DEFAULT_FILE_STORAGE = 'config.s3utils.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
MEDIA_ROOT = str(ROOT_DIR('misrutasmx/media'))
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'config.s3utils.StaticRootS3BotoStorage'
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
FACEBOOK_APP_ID = VAR_FACEBOOK_APP_ID
FACEBOOK_APP_SECRET = VAR_FACEBOOK_APP_SECRET
except:
pass
| |
from ..lm import DomainLMResource, GigawordLMResource
from ..data import Resource, get_resource_manager
from ..sc import SCChunkResource, IdfResource
from ..pipeline import (
TfIdfExtractor, LMProbExtractor, BasicFeaturesExtractor,
QueryFeaturesExtractor, GeoFeaturesExtractor
)
import os
from cuttsum.detector import ArticleDetector
import streamcorpus as sc
import re
import pandas as pd
import numpy as np
from itertools import izip
import signal
import Queue
import cuttsum.srilm
import gzip
from datetime import datetime, timedelta
class ArticlesResource(Resource):
def __init__(self):
Resource.__init__(self)
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'articles')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def check_coverage(self, event, corpus, **kwargs):
data_dir = os.path.join(self.dir_, event.fs_name())
hours = event.list_event_hours()
n_hours = len(hours)
n_covered = 0
for hour in hours:
path = os.path.join(data_dir, '{}.sc.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
if os.path.exists(path):
n_covered += 1
if n_hours == 0:
return 0
else:
return n_covered / float(n_hours)
def get_chunk_path(self, event, hour):
data_dir = os.path.join(self.dir_, event.fs_name())
return os.path.join(data_dir, u'{}.sc.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
def get(self, event, corpus, **kwargs):
data_dir = os.path.join(self.dir_, event.fs_name())
if not os.path.exists(data_dir):
os.makedirs(data_dir)
chunks = SCChunkResource()
overwrite = kwargs.get(u'overwrite', False)
hours = event.list_event_hours()
jobs = []
for hour in hours:
path = self.get_chunk_path(event, hour)
if os.path.exists(path) and overwrite is False:
continue
chunk_paths = chunks.get_chunks_for_hour(hour, corpus)
jobs.append((path, chunk_paths))
n_procs = kwargs.get(u'n_procs', 1)
progress_bar = kwargs.get(u'progress_bar', False)
self.do_work(_article_resource_worker, jobs, n_procs, progress_bar,
event=event, corpus=corpus)
def dependencies(self):
return tuple(['SCChunkResource'])
def __unicode__(self):
return u"cuttsum.pipeline.ArticlesResource"
def _article_resource_worker(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
event = kwargs.get(u'event')
corpus = kwargs.get(u'corpus')
while not job_queue.empty():
try:
opath, chunk_paths = job_queue.get(block=False)
artcl_detect = ArticleDetector(event)
patt = event.regex_pattern()
with sc.Chunk(path=opath, mode='wb', message=corpus.sc_msg()) as ochunk:
for path in chunk_paths:
for si in sc.Chunk(path=path, message=corpus.sc_msg()):
if si.body.clean_visible is None:
continue
elif patt.search(si.body.clean_visible, re.I):
#if corpus.annotator() not in si.body.sentences:
# continue
sentences = corpus.get_sentences(si)
sent_idxs = artcl_detect.find_articles(
sentences)
if len(sent_idxs) > 0:
rel_sents = []
for sent_idx in sent_idxs:
#for token in sentences[sent_idx].tokens:
# print token.token,
#print
rel_sents.append(sentences[sent_idx])
si.body.sentences[u'article-clf'] = rel_sents
ochunk.add(si)
result_queue.put(None)
except Queue.Empty:
pass
class SentenceFeaturesResource(Resource):
def __init__(self):
Resource.__init__(self)
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'sentence-features')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def dependencies(self):
return tuple([u'DomainLMResource', u'IdfResource',
u'ArticlesResource', u'GigawordLMResource',
u'SentenceStringsResource', u'GeoClustersResource'])
def __unicode__(self):
return u"cuttsum.pipeline.SentenceFeaturesResource"
def get_tsv_path(self, event, hour):
data_dir = os.path.join(self.dir_, event.fs_name())
return os.path.join(data_dir, u'{}.tsv.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
def get_dataframe(self, event, hour):
tsv = self.get_tsv_path(event, hour)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def check_coverage(self, event, corpus, **kwargs):
n_hours = 0
n_covered = 0
strings = get_resource_manager(u'SentenceStringsResource')
for hour in event.list_event_hours():
if os.path.exists(strings.get_tsv_path(event, hour)):
n_hours += 1
if os.path.exists(self.get_tsv_path(event, hour)):
n_covered += 1
if n_hours == 0:
return 0
else:
return n_covered / float(n_hours)
def get(self, event, corpus, overwrite=False, n_procs=1,
progress_bar=False, preroll=0, **kwargs):
strings = get_resource_manager(u'SentenceStringsResource')
data_dir = os.path.join(self.dir_, event.fs_name())
if not os.path.exists(data_dir):
os.makedirs(data_dir)
jobs = []
for hour in event.list_event_hours():
string_tsv_path = strings.get_tsv_path(event, hour)
feature_tsv_path = self.get_tsv_path(event, hour)
if os.path.exists(string_tsv_path):
if overwrite is True or not os.path.exists(feature_tsv_path):
jobs.append((string_tsv_path, hour, feature_tsv_path))
domainlm = get_resource_manager(u'DomainLMResource')
domainlm_port = domainlm.get_port(event)
gigawordlm = get_resource_manager(u'GigawordLMResource')
gigawordlm_port = gigawordlm.get_gigaword_port()
if not cuttsum.srilm.check_status(domainlm_port):
print u"starting domain.lm..."
cuttsum.srilm.start_lm(
domainlm.get_arpa_path(event), 3, domainlm_port)
if not cuttsum.srilm.check_status(gigawordlm_port):
print u"starting gigaword.lm..."
cuttsum.srilm.start_lm(
gigawordlm.get_arpa_path(), 5, gigawordlm_port)
self.do_work(sentencefeature_worker_, jobs, n_procs, progress_bar,
event=event, corpus=corpus, preroll=preroll)
def sentencefeature_worker_(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
event = kwargs.get(u'event')
corpus = kwargs.get(u'corpus')
preroll = kwargs.get(u'preroll')
basic_ext = BasicFeaturesExtractor()
geocache = get_resource_manager(u'GeoCacheResource')
geocluster = get_resource_manager(u'GeoClustersResource')
query_ext = QueryFeaturesExtractor(event)
domainlm = get_resource_manager(u'DomainLMResource')
domainlm_port = domainlm.get_port(event)
gigawordlm = get_resource_manager(u'GigawordLMResource')
gigawordlm_port = gigawordlm.get_gigaword_port()
lm_ext = LMProbExtractor(domainlm_port, 3, gigawordlm_port, 5)
idfs = get_resource_manager(u'IdfResource')
def get_idf_paths(hour):
return [idfs.get_idf_path(hour - timedelta(hours=i), corpus)
for i in range(preroll)]
def get_geo_cluster_paths(hour):
return [geocluster.get_tsv_path(event, hour),
geocluster.get_tsv_path(event, hour - timedelta(hours=1))]
while not job_queue.empty():
try:
string_tsv_path, hour, feature_tsv_path = \
job_queue.get(block=False)
idf_paths = get_idf_paths(hour)
tfidf_ext = TfIdfExtractor(idf_paths[0], idf_paths[1:])
geo_ext = GeoFeaturesExtractor(
geocache.get_tsv_path(),
get_geo_cluster_paths(hour))
with gzip.open(string_tsv_path, u'r') as f:
string_df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
feature_maps = []
articles = string_df.groupby(u'stream id')
for name, article in articles:
#for index, sentence in article.iterrows():
#times = map(lambda x: x.split('-')[0], article[u'stream id']
doc_time = datetime.utcfromtimestamp(int(name.split('-')[0]))
since_start = doc_time - event.start
cnlp_strings = article[u'corenlp'].tolist()
sc_strings = article[u'streamcorpus'].tolist()
geo_strings = article[u'locations'].tolist()
query_feats = query_ext.process_streamcorpus_strings(
sc_strings)
geo_feats = geo_ext.process_geo_strings(geo_strings)
tfidf_feats = tfidf_ext.process_streamcorpus_strings(
sc_strings, since_start.total_seconds())
lm_feats = lm_ext.process_corenlp_strings(cnlp_strings)
basic_feats = basic_ext.process_sentences(
sc_strings, cnlp_strings)
for index, (_, sentence) in enumerate(article.iterrows()):
#assert len(tfidf_feats[index]) == preroll
feature_map = {u'stream id': sentence[u'stream id'],
u'sentence id': sentence[u'sentence id']}
feature_map.update(basic_feats[index].iteritems())
feature_map.update(lm_feats[index].iteritems())
feature_map.update(tfidf_feats[index].iteritems())
feature_map.update(query_feats[index].iteritems())
feature_map.update(geo_feats[index].iteritems())
feature_maps.append(feature_map)
columns = [u'stream id', u'sentence id'] \
+ basic_ext.features + query_ext.features \
+ lm_ext.features + geo_ext.features \
+ tfidf_ext.features
df = pd.DataFrame(feature_maps, columns=columns)
#for i, x in enumerate(df.iloc[0]):
# print df.columns[i], x
#print np.all(pd.notnull(df))
#print pd.isnull(df).any(1).nonzero()[0]
assert np.all(pd.notnull(df))
with gzip.open(feature_tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False,
na_rep='nan')
result_queue.put(None)
except Queue.Empty:
pass
return True
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from six.moves import http_client
from keystone import catalog
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import rest
BASE_URL = 'http://127.0.0.1:35357/v2'
SERVICE_FIXTURE = object()
class V2CatalogTestCase(rest.RestfulTestCase):
def setUp(self):
super(V2CatalogTestCase, self).setUp()
self.useFixture(database.Database())
self.service_id = uuid.uuid4().hex
self.service = unit.new_service_ref()
self.service['id'] = self.service_id
self.catalog_api.create_service(
self.service_id,
self.service.copy())
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
def config_overrides(self):
super(V2CatalogTestCase, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
def _get_token_id(self, r):
"""Applicable only to JSON."""
return r.result['access']['token']['id']
def _endpoint_create(self, expected_status=200, service_id=SERVICE_FIXTURE,
publicurl='http://localhost:8080',
internalurl='http://localhost:8080',
adminurl='http://localhost:8080'):
if service_id is SERVICE_FIXTURE:
service_id = self.service_id
# FIXME(dolph): expected status should actually be 201 Created
path = '/v2.0/endpoints'
body = {
'endpoint': {
'adminurl': adminurl,
'service_id': service_id,
'region': 'RegionOne',
'internalurl': internalurl,
'publicurl': publicurl
}
}
r = self.admin_request(method='POST', token=self.get_scoped_token(),
path=path, expected_status=expected_status,
body=body)
return body, r
def _region_create(self):
region_id = uuid.uuid4().hex
self.catalog_api.create_region({'id': region_id})
return region_id
def _service_create(self):
service_id = uuid.uuid4().hex
service = unit.new_service_ref()
service['id'] = service_id
self.catalog_api.create_service(service_id, service)
return service_id
def test_endpoint_create(self):
req_body, response = self._endpoint_create()
self.assertIn('endpoint', response.result)
self.assertIn('id', response.result['endpoint'])
for field, value in req_body['endpoint'].items():
self.assertEqual(response.result['endpoint'][field], value)
def test_pure_v3_endpoint_with_publicurl_visible_from_v2(self):
"""Test pure v3 endpoint can be fetched via v2 API.
For those who are using v2 APIs, endpoints created by v3 API should
also be visible as there are no differences about the endpoints
except the format or the internal implementation.
And because public url is required for v2 API, so only the v3 endpoints
of the service which has the public interface endpoint will be
converted into v2 endpoints.
"""
region_id = self._region_create()
service_id = self._service_create()
# create a v3 endpoint with three interfaces
body = {
'endpoint': unit.new_endpoint_ref(service_id,
default_region_id=region_id)
}
for interface in catalog.controllers.INTERFACES:
body['endpoint']['interface'] = interface
self.admin_request(method='POST',
token=self.get_scoped_token(),
path='/v3/endpoints',
expected_status=http_client.CREATED,
body=body)
r = self.admin_request(token=self.get_scoped_token(),
path='/v2.0/endpoints')
# v3 endpoints having public url can be fetched via v2.0 API
self.assertEqual(1, len(r.result['endpoints']))
v2_endpoint = r.result['endpoints'][0]
self.assertEqual(service_id, v2_endpoint['service_id'])
# check urls just in case.
# This is not the focus of this test, so no different urls are used.
self.assertEqual(body['endpoint']['url'], v2_endpoint['publicurl'])
self.assertEqual(body['endpoint']['url'], v2_endpoint['adminurl'])
self.assertEqual(body['endpoint']['url'], v2_endpoint['internalurl'])
self.assertNotIn('name', v2_endpoint)
v3_endpoint = self.catalog_api.get_endpoint(v2_endpoint['id'])
# it's the v3 public endpoint's id as the generated v2 endpoint
self.assertEqual('public', v3_endpoint['interface'])
self.assertEqual(service_id, v3_endpoint['service_id'])
def test_pure_v3_endpoint_without_publicurl_invisible_from_v2(self):
"""Test pure v3 endpoint without public url can't be fetched via v2 API.
V2 API will return endpoints created by v3 API, but because public url
is required for v2 API, so v3 endpoints without public url will be
ignored.
"""
region_id = self._region_create()
service_id = self._service_create()
# create a v3 endpoint without public interface
body = {
'endpoint': unit.new_endpoint_ref(service_id,
default_region_id=region_id)
}
for interface in catalog.controllers.INTERFACES:
if interface == 'public':
continue
body['endpoint']['interface'] = interface
self.admin_request(method='POST',
token=self.get_scoped_token(),
path='/v3/endpoints',
expected_status=http_client.CREATED,
body=body)
r = self.admin_request(token=self.get_scoped_token(),
path='/v2.0/endpoints')
# v3 endpoints without public url won't be fetched via v2.0 API
self.assertEqual(0, len(r.result['endpoints']))
def test_endpoint_create_with_null_adminurl(self):
req_body, response = self._endpoint_create(adminurl=None)
self.assertIsNone(req_body['endpoint']['adminurl'])
self.assertNotIn('adminurl', response.result['endpoint'])
def test_endpoint_create_with_empty_adminurl(self):
req_body, response = self._endpoint_create(adminurl='')
self.assertEqual('', req_body['endpoint']['adminurl'])
self.assertNotIn("adminurl", response.result['endpoint'])
def test_endpoint_create_with_null_internalurl(self):
req_body, response = self._endpoint_create(internalurl=None)
self.assertIsNone(req_body['endpoint']['internalurl'])
self.assertNotIn('internalurl', response.result['endpoint'])
def test_endpoint_create_with_empty_internalurl(self):
req_body, response = self._endpoint_create(internalurl='')
self.assertEqual('', req_body['endpoint']['internalurl'])
self.assertNotIn("internalurl", response.result['endpoint'])
def test_endpoint_create_with_null_publicurl(self):
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=None)
def test_endpoint_create_with_empty_publicurl(self):
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl='')
def test_endpoint_create_with_null_service_id(self):
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
service_id=None)
def test_endpoint_create_with_empty_service_id(self):
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
service_id='')
def test_endpoint_create_with_valid_url(self):
"""Create endpoint with valid URL should be tested, too."""
# list one valid url is enough, no need to list too much
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
# baseline tests that all valid URLs works
self._endpoint_create(expected_status=200,
publicurl=valid_url,
internalurl=valid_url,
adminurl=valid_url)
def test_endpoint_create_with_invalid_url(self):
"""Test the invalid cases: substitutions is not exactly right."""
invalid_urls = [
# using a substitution that is not whitelisted - KeyError
'http://127.0.0.1:8774/v1.1/$(nonexistent)s',
# invalid formatting - ValueError
'http://127.0.0.1:8774/v1.1/$(tenant_id)',
'http://127.0.0.1:8774/v1.1/$(tenant_id)t',
'http://127.0.0.1:8774/v1.1/$(tenant_id',
# invalid type specifier - TypeError
# admin_url is a string not an int
'http://127.0.0.1:8774/v1.1/$(admin_url)d',
]
# list one valid url is enough, no need to list too much
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
# Case one: publicurl, internalurl and adminurl are
# all invalid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=invalid_url,
internalurl=invalid_url,
adminurl=invalid_url)
# Case two: publicurl, internalurl are invalid
# and adminurl is valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=invalid_url,
internalurl=invalid_url,
adminurl=valid_url)
# Case three: publicurl, adminurl are invalid
# and internalurl is valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=invalid_url,
internalurl=valid_url,
adminurl=invalid_url)
# Case four: internalurl, adminurl are invalid
# and publicurl is valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=valid_url,
internalurl=invalid_url,
adminurl=invalid_url)
# Case five: publicurl is invalid, internalurl
# and adminurl are valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=invalid_url,
internalurl=valid_url,
adminurl=valid_url)
# Case six: internalurl is invalid, publicurl
# and adminurl are valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=valid_url,
internalurl=invalid_url,
adminurl=valid_url)
# Case seven: adminurl is invalid, publicurl
# and internalurl are valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=http_client.BAD_REQUEST,
publicurl=valid_url,
internalurl=valid_url,
adminurl=invalid_url)
class TestV2CatalogAPISQL(unit.TestCase):
def setUp(self):
super(TestV2CatalogAPISQL, self).setUp()
self.useFixture(database.Database())
self.catalog_api = catalog.Manager()
self.service_id = uuid.uuid4().hex
service = {'id': self.service_id, 'name': uuid.uuid4().hex}
self.catalog_api.create_service(self.service_id, service)
endpoint = self.new_endpoint_ref(service_id=self.service_id)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
def config_overrides(self):
super(TestV2CatalogAPISQL, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
def new_endpoint_ref(self, service_id):
return {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
}
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
# the only endpoint in the catalog is the one created in setUp
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
self.assertEqual(1, len(catalog))
# it's also the only endpoint in the backend
self.assertEqual(1, len(self.catalog_api.list_endpoints()))
# create a new, invalid endpoint - malformed type declaration
endpoint = self.new_endpoint_ref(self.service_id)
endpoint['url'] = 'http://keystone/%(tenant_id)'
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# create a new, invalid endpoint - nonexistent key
endpoint = self.new_endpoint_ref(self.service_id)
endpoint['url'] = 'http://keystone/%(you_wont_find_me)s'
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# verify that the invalid endpoints don't appear in the catalog
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
self.assertEqual(1, len(catalog))
# all three endpoints appear in the backend
self.assertEqual(3, len(self.catalog_api.list_endpoints()))
def test_get_catalog_always_returns_service_name(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
# create a service, with a name
named_svc = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
}
self.catalog_api.create_service(named_svc['id'], named_svc)
endpoint = self.new_endpoint_ref(service_id=named_svc['id'])
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# create a service, with no name
unnamed_svc = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex
}
self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc)
endpoint = self.new_endpoint_ref(service_id=unnamed_svc['id'])
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
region = None
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
self.assertEqual(named_svc['name'],
catalog[region][named_svc['type']]['name'])
self.assertEqual('', catalog[region][unnamed_svc['type']]['name'])
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import re
import shutil
import socket
import StringIO
import sys
import netaddr
from oslo.config import cfg
from quantum.agent.linux import ip_lib
from quantum.agent.linux import utils
from quantum.openstack.common import jsonutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('dhcp_confs',
default='$state_path/dhcp',
help=_('Location to store DHCP server config files')),
cfg.IntOpt('dhcp_lease_time',
default=120,
help=_('Lifetime of a DHCP lease in seconds')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('dnsmasq_config_file',
default='',
help=_('Override the default dnsmasq settings with this file')),
cfg.StrOpt('dnsmasq_dns_server',
help=_('Use another DNS server before any in '
'/etc/resolv.conf.')),
]
IPV4 = 4
IPV6 = 6
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_IP = '169.254.169.254'
class DhcpBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self, conf, network, root_helper='sudo',
device_delegate=None, namespace=None, version=None):
self.conf = conf
self.network = network
self.root_helper = root_helper
self.device_delegate = device_delegate
self.namespace = namespace
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError
class DhcpLocalProcess(DhcpBase):
PORTS = []
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
interface_name = self.device_delegate.setup(self.network,
reuse_existing=True)
if self.active:
self.restart()
elif self._enable_dhcp():
self.interface_name = interface_name
self.spawn_process()
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
pid = self.pid
if self.active:
cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper)
if not retain_port:
self.device_delegate.destroy(self.network, self.interface_name)
elif pid:
LOG.debug(_('DHCP for %(net_id)s pid %(pid)d is stale, ignoring '
'command'), {'net_id': self.network.id, 'pid': pid})
else:
LOG.debug(_('No DHCP started for %s'), self.network.id)
self._remove_config_files()
def _remove_config_files(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
shutil.rmtree(conf_dir, ignore_errors=True)
def get_conf_file_name(self, kind, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
if ensure_conf_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0755)
return os.path.join(conf_dir, kind)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter and converter(f.read()) or f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg % file_name)
return None
@property
def pid(self):
"""Last known pid for the DHCP process spawned for this network."""
return self._get_value_from_conf_file('pid', int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmd = ['cat', '/proc/%s/cmdline' % pid]
try:
return self.network.id in utils.execute(cmd, self.root_helper)
except RuntimeError:
return False
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface',
ensure_conf_dir=True)
utils.replace_file(interface_file_path, value)
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Quantum port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
QUANTUM_NETWORK_ID_KEY = 'QUANTUM_NETWORK_ID'
QUANTUM_RELAY_SOCKET_PATH_KEY = 'QUANTUM_RELAY_SOCKET_PATH'
MINIMUM_VERSION = 2.59
@classmethod
def check_version(cls):
ver = 0
try:
cmd = ['dnsmasq', '--version']
out = utils.execute(cmd)
ver = re.findall("\d+.\d+", out)[0]
is_valid_version = float(ver) >= cls.MINIMUM_VERSION
if not is_valid_version:
LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY! '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
except (OSError, RuntimeError, IndexError, ValueError):
LOG.warning(_('Unable to determine dnsmasq version. '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
return float(ver)
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
class FakeNetwork:
def __init__(self, net_id):
self.id = net_id
return [
c for c in os.listdir(confs_dir)
if (uuidutils.is_uuid_like(c) and
cls(conf, FakeNetwork(c), root_helper).active)
]
def spawn_process(self):
"""Spawns a Dnsmasq process for the network."""
env = {
self.QUANTUM_NETWORK_ID_KEY: self.network.id,
self.QUANTUM_RELAY_SOCKET_PATH_KEY:
self.conf.dhcp_lease_relay_socket
}
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % self.get_conf_file_name(
'pid', ensure_conf_dir=True),
#TODO (mark): calculate value from cidr (defaults to 150)
#'--dhcp-lease-max=%s' % ?,
'--dhcp-hostsfile=%s' % self._output_hosts_file(),
'--dhcp-optsfile=%s' % self._output_opts_file(),
'--dhcp-script=%s' % self._lease_relay_script_path(),
'--leasefile-ro',
]
for i, subnet in enumerate(self.network.subnets):
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# TODO(mark): how do we indicate other options
# ra-only, slaac, ra-nameservers, and ra-stateless.
mode = 'static'
if self.version >= self.MINIMUM_VERSION:
set_tag = 'set:'
else:
set_tag = ''
cmd.append('--dhcp-range=%s%s,%s,%s,%ss' %
(set_tag, self._TAG_PREFIX % i,
netaddr.IPNetwork(subnet.cidr).network,
mode,
self.conf.dhcp_lease_time))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_server:
cmd.append('--server=%s' % self.conf.dnsmasq_dns_server)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=env)
else:
# For normal sudo prepend the env vars before command
cmd = ['%s=%s' % pair for pair in env.items()] + cmd
utils.execute(cmd, self.root_helper)
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug(_('Killing dhcpmasq for network since all subnets have '
'turned off DHCP: %s'), self.network.id)
return
self._output_hosts_file()
self._output_opts_file()
if self.active:
cmd = ['kill', '-HUP', self.pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid)
LOG.debug(_('Reloading allocations for network: %s'), self.network.id)
def _output_hosts_file(self):
"""Writes a dnsmasq compatible hosts file."""
r = re.compile('[:.]')
buf = StringIO.StringIO()
for port in self.network.ports:
for alloc in port.fixed_ips:
name = '%s.%s' % (r.sub('-', alloc.ip_address),
self.conf.dhcp_domain)
buf.write('%s,%s,%s\n' %
(port.mac_address, name, alloc.ip_address))
name = self.get_conf_file_name('host')
utils.replace_file(name, buf.getvalue())
return name
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
options = []
for i, subnet in enumerate(self.network.subnets):
if not subnet.enable_dhcp:
continue
if subnet.dns_nameservers:
options.append(
self._format_option(i, 'dns-server',
','.join(subnet.dns_nameservers)))
host_routes = ["%s,%s" % (hr.destination, hr.nexthop)
for hr in subnet.host_routes]
# Add host routes for isolated network segments
enable_metadata = (
self.conf.enable_isolated_metadata
and not subnet.gateway_ip
and subnet.ip_version == 4)
if enable_metadata:
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if host_routes:
options.append(
self._format_option(i, 'classless-static-route',
','.join(host_routes)))
if subnet.ip_version == 4:
if subnet.gateway_ip:
options.append(self._format_option(i, 'router',
subnet.gateway_ip))
else:
options.append(self._format_option(i, 'router'))
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(
self.interface_name,
self.root_helper,
self.namespace
)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _lease_relay_script_path(self):
return os.path.join(os.path.dirname(sys.argv[0]),
'quantum-dhcp-agent-dnsmasq-lease-update')
def _format_option(self, index, option_name, *args):
if self.version >= self.MINIMUM_VERSION:
set_tag = 'tag:'
else:
set_tag = ''
return ','.join((set_tag + self._TAG_PREFIX % index,
'option:%s' % option_name) + args)
@classmethod
def lease_update(cls):
network_id = os.environ.get(cls.QUANTUM_NETWORK_ID_KEY)
dhcp_relay_socket = os.environ.get(cls.QUANTUM_RELAY_SOCKET_PATH_KEY)
action = sys.argv[1]
if action not in ('add', 'del', 'old'):
sys.exit()
mac_address = sys.argv[2]
ip_address = sys.argv[3]
if action == 'del':
lease_remaining = 0
else:
lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
data = dict(network_id=network_id, mac_address=mac_address,
ip_address=ip_address, lease_remaining=lease_remaining)
if os.path.exists(dhcp_relay_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(dhcp_relay_socket)
sock.send(jsonutils.dumps(data))
sock.close()
| |
"""
Tests for the Index constructor conducting inference.
"""
from decimal import Decimal
import numpy as np
import pytest
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas import (
NA,
Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
NaT,
PeriodIndex,
Series,
TimedeltaIndex,
Timestamp,
UInt64Index,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
class TestIndexConstructorInference:
@pytest.mark.parametrize("na_value", [None, np.nan])
@pytest.mark.parametrize("vtype", [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH#18505 : valid tuples containing NaN
values = [(1, "two"), (3.0, na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[int, "int64", "int32", "int16", "int8", "uint64", "uint32", "uint16", "uint8"],
)
def test_constructor_int_dtype_float(self, dtype):
# GH#18400
if is_unsigned_integer_dtype(dtype):
index_type = UInt64Index
else:
index_type = Int64Index
expected = index_type([0, 1, 2, 3])
result = Index([0.0, 1.0, 2.0, 3.0], dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize(
"vals", [[True, False, True], np.array([True, False, True], dtype=bool)]
)
def test_constructor_dtypes_to_object(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=bool)
else:
index = Index(vals)
assert type(index) is Index
assert index.dtype == object
def test_constructor_categorical_to_object(self):
# GH#32167 Categorical data and dtype=object should return object-dtype
ci = CategoricalIndex(range(5))
result = Index(ci, dtype=object)
assert not isinstance(result, CategoricalIndex)
def test_constructor_infer_periodindex(self):
xp = period_range("2012-1-1", freq="M", periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
def test_from_list_of_periods(self):
rng = period_range("1/1/2000", periods=20, freq="D")
periods = list(rng)
result = Index(periods)
assert isinstance(result, PeriodIndex)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize(
"klass,dtype,ctor",
[
(DatetimeIndex, "datetime64[ns]", np.datetime64("nat")),
(TimedeltaIndex, "timedelta64[ns]", np.timedelta64("nat")),
],
)
def test_constructor_infer_nat_dt_like(
self, pos, klass, dtype, ctor, nulls_fixture, request
):
if isinstance(nulls_fixture, Decimal):
# We dont cast these to datetime64/timedelta64
return
expected = klass([NaT, NaT])
assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
warn = None
if nulls_fixture is NA:
expected = Index([NA, NaT])
mark = pytest.mark.xfail(reason="Broken with np.NaT ctor; see GH 31884")
request.node.add_marker(mark)
# GH#35942 numpy will emit a DeprecationWarning within the
# assert_index_equal calls. Since we can't do anything
# about it until GH#31884 is fixed, we suppress that warning.
warn = DeprecationWarning
result = Index(data)
with tm.assert_produces_warning(warn):
tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
with tm.assert_produces_warning(warn):
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_constructor_mixed_nat_objs_infers_object(self, swap_objs):
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64("nat"), np.timedelta64("nat")]
if swap_objs:
data = data[::-1]
expected = Index(data, dtype=object)
tm.assert_index_equal(Index(data), expected)
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
class TestDtypeEnforced:
# check we don't silently ignore the dtype keyword
@pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])
def test_constructor_range_values_mismatched_dtype(self, dtype):
rng = Index(range(5))
result = Index(rng, dtype=dtype)
assert result.dtype == dtype
result = Index(range(5), dtype=dtype)
assert result.dtype == dtype
@pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])
def test_constructor_categorical_values_mismatched_non_ea_dtype(self, dtype):
cat = Categorical([1, 2, 3])
result = Index(cat, dtype=dtype)
assert result.dtype == dtype
def test_constructor_categorical_values_mismatched_dtype(self):
dti = date_range("2016-01-01", periods=3)
cat = Categorical(dti)
result = Index(cat, dti.dtype)
tm.assert_index_equal(result, dti)
dti2 = dti.tz_localize("Asia/Tokyo")
cat2 = Categorical(dti2)
result = Index(cat2, dti2.dtype)
tm.assert_index_equal(result, dti2)
ii = IntervalIndex.from_breaks(range(5))
cat3 = Categorical(ii)
result = Index(cat3, dtype=ii.dtype)
tm.assert_index_equal(result, ii)
def test_constructor_ea_values_mismatched_categorical_dtype(self):
dti = date_range("2016-01-01", periods=3)
result = Index(dti, dtype="category")
expected = CategoricalIndex(dti)
tm.assert_index_equal(result, expected)
dti2 = date_range("2016-01-01", periods=3, tz="US/Pacific")
result = Index(dti2, dtype="category")
expected = CategoricalIndex(dti2)
tm.assert_index_equal(result, expected)
def test_constructor_period_values_mismatched_dtype(self):
pi = period_range("2016-01-01", periods=3, freq="D")
result = Index(pi, dtype="category")
expected = CategoricalIndex(pi)
tm.assert_index_equal(result, expected)
def test_constructor_timedelta64_values_mismatched_dtype(self):
# check we don't silently ignore the dtype keyword
tdi = timedelta_range("4 Days", periods=5)
result = Index(tdi, dtype="category")
expected = CategoricalIndex(tdi)
tm.assert_index_equal(result, expected)
def test_constructor_interval_values_mismatched_dtype(self):
dti = date_range("2016-01-01", periods=3)
ii = IntervalIndex.from_breaks(dti)
result = Index(ii, dtype="category")
expected = CategoricalIndex(ii)
tm.assert_index_equal(result, expected)
def test_constructor_datetime64_values_mismatched_period_dtype(self):
dti = date_range("2016-01-01", periods=3)
result = Index(dti, dtype="Period[D]")
expected = dti.to_period("D")
tm.assert_index_equal(result, expected)
class TestIndexConstructorUnwrapping:
# Test passing different arraylike values to pd.Index
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series_dt64(self, klass):
stamps = [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")]
expected = DatetimeIndex(stamps)
ser = Series(stamps)
result = klass(ser)
tm.assert_index_equal(result, expected)
| |
#!/usr/bin/env python
import shelve
import sys
from QBOmy import QBO
intermediateObjCount=0;
# [{name:bag,selected:boolean,intermediate sql,schema}]
BagDict2 = [];
Selected = []
OperationsList = []
qbo=None;
d=None
def initBackend():
global qbo,intermediateObjCount, BagDict2,d,Selected
d = shelve.open("session_shelve_file") # open, with (g)dbm filename -- no suffix
if(d.has_key("key")):
# if(d.has_key("BagDict2")):
# del d["BagDict2"]
# d["BagDict2"]=[]
Selected = d["Selected"]
intermediateObjCount=d["intermediateObjCount"]
BagDict2 = d["BagDict2"]
OperationsList = d["OperationsList"]
else:
intermediateObjCount=0
BagDict2 = []
Selected = [None,None]
OperationsList = [{"name":"union","selected":False},
{"name":"except","selected":False},
{"name":"intersect","selected":False}]
qbo=QBO({"intermediateObjCount":intermediateObjCount, "BagDict2":BagDict2,"Selected":Selected,"OperationsList":OperationsList})
def addBagHelper(x,cnt):
try_name=x+("" if cnt==0 else str(cnt))
found=False
for val in qbo.BagDict2:
if(val["name"]==try_name):
found=True
addBagHelper(x,cnt+1)
break;
if not found:
# sys.stderr.write("hello")
qbo.insertObjectInBag(x,try_name)
def addBag(x):
# -- main code
global qbo,d
initBackend()
addBagHelper(x,0)
endBackend()
def endBackend():
global d;
d["intermediateObjCount"]=qbo.intermediateObjCount
d["BagDict2"]=qbo.BagDict2
d["Selected"] = qbo.Selected
d["OperationsList"]=qbo.OperationsList
d["key"]=True
d.close()
def getObjects():
global qbo,d
initBackend()
retval=qbo.ObjectsList
endBackend()
return retval
def getBags():
global qbo,d
initBackend()
retval=qbo.BagDict2
endBackend()
return retval
def deleteAllBags():
global qbo,d
initBackend()
qbo.BagDict2=[]
endBackend()
def selectTable(x):
global qbo
initBackend()
qbo.selectTable(x)
# for obj in qbo.BagDict2:
# if(obj['name']==x):
# obj['selected']=True
endBackend()
def unSelectTable(x):
global qbo
initBackend()
qbo.unselectTable(x)
# for obj in qbo.BagDict2:
# if(obj['name']==x):
# obj['selected']=False
endBackend()
def selectOperation(x):
global qbo
initBackend()
sys.stderr.write(x)
qbo.selectOperation(x)
# for obj in qbo.BagDict2:
# if(obj['name']==x):
# obj['selected']=True
endBackend()
def unSelectOperation(x):
global qbo
initBackend()
sys.stderr.write(x)
qbo.unselectOperation(x)
# for obj in qbo.BagDict2:
# if(obj['name']==x):
# obj['selected']=False
endBackend()
def deleteBag(x):
global qbo
initBackend()
for i, obj in enumerate(qbo.BagDict2):
if(obj["name"]==x):
del qbo.BagDict2[i]
break;
endBackend()
def operateHelper(x,cnt):
try_name=x+("" if cnt==0 else str(cnt))
found=False
for val in qbo.BagDict2:
if(val["name"]==try_name):
found=True
operateHelper(x,cnt+1)
break;
if not found:
sys.stderr.write("hello")
qbo.operate(try_name);
def operate():
global qbo
initBackend()
operateHelper('intermediateBag',0)
endBackend()
def renameBagHelper(x,cnt):
try_name=x+('' if cnt==0 else str(cnt))
for obj in qbo.BagDict2:
if(obj["name"]==try_name):
return renameBagHelper(x,cnt+1)
return try_name
def renameBag(x,newname):
global qbo
initBackend()
for i, obj in enumerate(qbo.BagDict2):
if(obj["name"]==x):
qbo.renameBag(i,x,renameBagHelper(newname,0))
endBackend()
def viewData(x):
global qbo
initBackend()
retval= qbo.viewData(x)
endBackend()
return retval
def printForm(x):
global qbo
initBackend()
retval= qbo.printForm(x)
endBackend()
return retval
def finalize(table,finalSchema,initialCount,finalCount):
initBackend()
whereString = ""
colString = ""
colCount=0
whereCount=0
for i in finalSchema:
if(colCount==0):
colString = colString + i[0]
colCount = colCount+1
else:
colString = colString + " , "+ i[0]
colCount = colCount+1
if(i[1]!=None):
sys.stderr.write("^^^"+str(i[0])+"^^^"+str(i[1])+"^^^");
if(whereCount == 0):
whereString = ' where ' + i[0] + " like '" +i[1] +"'"
whereCount = whereCount +1
else:
whereString = whereString + "," + i[0] + " like '" + i[1] + "'"
whereCount = whereCount +1
sys.stderr.write("Finalize whereString: "+whereString +" colString: " + colString);
if(initialCount==finalCount):
colString ="*"
for index,i in enumerate(qbo.BagDict2):
if(i["name"] == table):
oldsql = i["sql"]+ ' as ' + table
newsql = "select "+colString+" "+oldsql[oldsql.find("from"):]+whereString
# sys.stderr.write("--- "+ oldsql+ " ----")
# sys.stderr.write("--- "+ newsql+ " ----")
qbo.BagDict2[index]["sql"] = newsql
sys.stderr.write("---- > "+ str(qbo.BagDict2[index]["sql"])+ "<-------")
endBackend()
return
endBackend()
def getOperations():
global qbo,d
initBackend()
retval=qbo.OperationsList
endBackend()
return retval
| |
# Copyright (c) 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to VirtualBox via VBoxManage.
"""
import os
import time
from oslo_config import cfg
from oslo_concurrency import processutils
from oslo_log import log as logging
from nova import exception
from nova.i18n import _LW
from nova import utils
from nova.virt.virtualbox import constants
from nova.virt.virtualbox import exception as vbox_exc
LOG = logging.getLogger(__name__)
VIRTUAL_BOX = [
cfg.IntOpt('retry_count',
default=3,
help='The number of times to retry to execute command.'),
cfg.IntOpt('retry_interval',
default=1,
help='Interval between execute attempts, in seconds.'),
cfg.StrOpt('vboxmanage_cmd',
default="VBoxManage",
help='Path of VBoxManage executable which is used to '
'comunicate with the VirtualBox.'),
]
CONF = cfg.CONF
CONF.register_opts(VIRTUAL_BOX, 'virtualbox')
class VBoxManage(object):
# Commands list
CONTROL_VM = "controlvm"
CLONE_HD = "clonehd"
CLOSE_MEDIUM = "closemedium"
CREATE_HD = "createhd"
CREATE_VM = "createvm"
LIST = "list"
MODIFY_HD = "modifyhd"
MODIFY_VM = "modifyvm"
SET_PROPERTY = "setproperty"
SHOW_VM_INFO = "showvminfo"
SHOW_HD_INFO = "showhdinfo"
SNAPSHOT = "snapshot"
START_VM = "startvm"
STORAGE_ATTACH = "storageattach"
STORAGE_CTL = "storagectl"
UNREGISTER_VM = "unregistervm"
VERSION = "--version"
@classmethod
def _execute(cls, command, *args):
"""Execute the received command and returns stdout and stderr."""
LOG.debug("Execute: VBoxManage --nologo %(command)s %(args)s",
{"command": command, "args": args})
for _ in range(CONF.virtualbox.retry_count):
try:
stdout, stderr = utils.execute(
CONF.virtualbox.vboxmanage_cmd, "--nologo",
command.lower(), *args)
except processutils.ProcessExecutionError as exc:
stdout, stderr = exc.stdout, exc.stderr
if (constants.VBOX_E_ACCESSDENIED in stderr or
constants.VBOX_E_INVALID_OBJECT_STATE in stderr):
LOG.warning(_LW("Something went wrong, trying again."))
time.sleep(CONF.virtualbox.retry_interval)
continue
break
else:
LOG.warning(_LW("Failed to process command."))
return (stdout, stderr)
@classmethod
def _check_stderr(cls, stderr, instance=None, method=None):
# TODO(alexandrucoman): Check for another common exceptions
if constants.VBOX_E_INSTANCE_NOT_FOUND in stderr:
raise exception.InstanceNotFound(instance_id=instance.uuid)
if (constants.VBOX_E_INVALID_VM_STATE in stderr or
constants.VBOX_E_INVALID_VM_STATE_2 in stderr):
raise exception.InstanceInvalidState(
attr=None, instance_uuid=instance.uuid,
state=instance.power_state, method=method)
@classmethod
def _storageattach(cls, instance, controller, port, device, drive_type,
medium, *args):
"""Attach, modify or remove a storage medium connected to a
storage controller.
"""
command = [cls.STORAGE_ATTACH, instance.name,
"--storagectl", controller,
"--port", port,
"--device", device,
"--type", drive_type,
"--medium", medium]
if args:
command.extend(args)
output, error = cls._execute(*command)
if error:
if constants.NS_ERROR_INVALID_ARG in error:
raise vbox_exc.VBoxInvalid(reason=error)
cls._check_stderr(error, instance, cls.STORAGE_ATTACH)
return (output, error)
@classmethod
def version(cls):
"""Return the VirtualBox version."""
output, _ = cls._execute(cls.VERSION)
return output
@classmethod
def set_property(cls, name, value):
"""Change global settings which affect the entire VirtualBox
installation.
For property name the following values are allowed:
'VBOX_VRDE_EXTPACK': This specifies which library implements the
VirtualBox Remote Desktop Extension.
'VBOX_MACHINE_FOLDER': This specifies the default folder in
which virtual machine definitions are kept.
"""
if name not in constants.ALL_VBOX_PROPERTIES:
raise vbox_exc.VBoxValueNotAllowed(
argument="name", value=name, method=cls.SET_PROPERTY,
allowed_values=constants.ALL_VBOX_PROPERTIES)
_, error = cls._execute(cls.SET_PROPERTY, name, value)
if error:
raise vbox_exc.VBoxManageError(method=cls.SET_PROPERTY,
reason=error)
@classmethod
def control_vm(cls, instance, state):
"""Change the state of a virtual machine that is currently
running.
:param instance: nova.objects.instance.Instance
:param state: one of the state from ALL_STATES container or
one button from ALL_ACPI_BUTTONS
"""
valid_states = constants.ALL_STATES + constants.ALL_ACPI_BUTTONS
if state not in valid_states:
# Unknown state for VirtualBox
raise vbox_exc.VBoxValueNotAllowed(
argument="state", value=state, method=cls.CONTROL_VM,
allowed_values=valid_states)
_, error = cls._execute(cls.CONTROL_VM, instance.name, state)
if error and constants.DONE not in error:
cls._check_stderr(error, instance, cls.CONTROL_VM)
raise vbox_exc.VBoxManageError(method=cls.CONTROL_VM,
reason=error)
@classmethod
def start_vm(cls, instance, method=constants.START_VM_HEADLESS):
"""Start a virtual machine that is currently in the
"Powered off" or "Saved" states.
For method the following values are allowed:
:START_VM_GUI: Starts a virtual machine showing a GUI
window.
:START_VM_HEADLESS: Starts a virtual machine without a
window for remote display only.
:START_VM_SDL: Starts a virtual machine with a minimal
GUI and limited features.
"""
if method not in constants.ALL_START_VM:
raise vbox_exc.VBoxValueNotAllowed(
argument="method", value=method, method=cls.START_VM,
allowed_values=constants.ALL_START_VM)
for _ in range(CONF.virtualbox.retry_count):
output, error = cls._execute(cls.START_VM, instance.name, "--type",
method)
if error and constants.DONE not in error:
if constants.VERR_INTERNAL_ERROR in error:
LOG.warning(_LW("Something went wrong, trying again."))
time.sleep(CONF.virtualbox.retry_interval)
continue
cls._check_stderr(error, instance, cls.START_VM)
raise vbox_exc.VBoxManageError(method="startvm", reason=error)
break
@classmethod
def modify_hd(cls, filename, field, value=None):
"""Change the characteristics of a disk image after it has
been created.
The following fields are available with VBoxManage modifyhd:
:FIELD_HD_AUTORESET: determines whether the disk is
automatically reset on every VM startup
:FIELD_HD_COMPACT: compact disk images, i.e. remove blocks
that only contains zeroes
:FIELD_HD_RESIZE_BYTE: allows you to change the capacity of
an existing image
:FIELD_HD_RESIZE_MB: allows you to change the capacity of
an existing image
"""
if field not in constants.ALL_HD_FIELDS:
raise vbox_exc.VBoxValueNotAllowed(
argument="field", value=field, method=cls.MODIFY_HD,
allowed_values=constants.ALL_START_VM)
command = [cls.MODIFY_HD, filename, field]
if value:
command.append(value)
_, error = cls._execute(*command)
if error and constants.DONE not in error:
raise vbox_exc.VBoxManageError(method=cls.MODIFY_HD, reason=error)
@classmethod
def modify_vm(cls, instance, field, *args):
"""Change general settings for a registered virtual machine.
The following fields are available with VBoxManage modifyvm:
:FIELD_OS_TYPE: This specifies what guest operating system
is supposed to run in the virtual machine
:FIELD_MEMORY: This sets the amount of RAM, in MB, that
the virtual machine should allocate for
itself from the host
:FIELD_CPUS: This sets the number of virtual CPUs for the
virtual machine
.. note::
Is required that the machine to be powered off (either
running or in "saved" state).
"""
if field not in constants.ALL_VM_FIELDS:
raise vbox_exc.VBoxValueNotAllowed(
argument="field", value=field, method=cls.MODIFY_VM,
allowed_values=constants.ALL_VM_FIELDS)
_, error = cls._execute(cls.MODIFY_VM, instance.name, field, *args)
if error:
cls._check_stderr(error, instance, cls.MODIFY_VM)
raise vbox_exc.VBoxManageError(method=cls.MODIFY_VM, reason=error)
@classmethod
def modify_network(cls, instance, field, index, value):
"""Change the network settings for a registered virtual machine.
:param instance:
:param field:
:param index: specifies the virtual network adapter whose
settings should be changed.
:param value:
The following fields are available with VBoxManage modify_network:
:FIELD_NIC: type of networking (nat, bridge etc)
:FIELD_NIC_TYPE: networking hardware
:FIELD_CABLE_CONNECTED: connect / disconnect network
:FIELD_BRIDGE_ADAPTER: host interface used by virtual network
interface
:FILED_MAC_ADDRESS: MAC address of the virtual network card
"""
if field not in constants.ALL_NETWORK_FIELDS:
raise vbox_exc.VBoxValueNotAllowed(
argument="field", value=field, method="modify_network",
allowed_values=constants.ALL_NETWORK_FIELDS)
_, error = cls._execute(cls.MODIFY_VM, instance.name,
field % {"index": index}, value)
if error:
cls._check_stderr(error, instance, "modify_network")
raise vbox_exc.VBoxManageError(method="modify_network",
reason=error)
@classmethod
def modify_vrde(cls, instance, field, value):
"""Change settings regarding VRDE for a registered virtual machine.
The following fields are available with VBoxManage modify_vrde:
:FIELD_VRDE_EXTPACK: specifies which VRDE library will be used
:FIELD_VRDE_MULTICON: enables multiple connections to the same
VRDE server
:FIELD_VRDE_PORT: a port or a range of ports the VRDE server
can bind to
:FIELD_VRDE_SERVER: enables or disables the VirtualBox remote
desktop extension (VRDE) server
:FIELD_VRDE_VIDEO: enables or disables video redirection,
if it is supported by the VRDE server
"""
if field not in constants.ALL_VRDE_FIELDS:
raise vbox_exc.VBoxValueNotAllowed(
argument="field", value=field, method="modify_vrde",
allowed_values=constants.ALL_VRDE_FIELDS)
_, error = cls._execute(cls.MODIFY_VM, instance.name, field, value)
if error:
cls._check_stderr(error, instance, "modify_vrde")
raise vbox_exc.VBoxManageError(method="modify_vrde",
reason=error)
@classmethod
def list(cls, information):
"""Gives relevant information about host and information
about VirtualBox's current settings.
The following information are available with VBoxManage list:
:HOST_INFO: information about the host system
:OSTYPES_INFO: lists all guest operating systems
presently known to VirtualBox
:VMS_INFO: lists all virtual machines currently
registered with VirtualBox
:RUNNINGVMS_INFO: lists all currently running virtual
machines by their unique identifiers
"""
output, error = cls._execute(cls.LIST, information)
if error:
raise vbox_exc.VBoxManageError(method=cls.LIST, reason=error)
return output
@classmethod
def show_vm_info(cls, instance):
"""Show the configuration of a particular VM."""
information = {}
output, error = cls._execute(cls.SHOW_VM_INFO, instance.name,
"--machinereadable")
if error:
cls._check_stderr(error, instance, cls.SHOW_VM_INFO)
raise vbox_exc.VBoxManageError(method=cls.SHOW_VM_INFO,
reason=error)
for line in output.splitlines():
line = line.strip()
if not line:
continue
key, separator, value = line.partition("=")
value = value.strip(' "')
key = key.strip(' "')
if separator != "=":
LOG.warning("Could not parse the following line: %s", line)
continue
information[key] = value if value != "none" else None
return information
@classmethod
def show_hd_info(cls, vhd):
"""Shows information about a virtual hard disk image."""
output, error = cls._execute(cls.SHOW_HD_INFO, vhd)
if error:
if constants.NS_ERROR_INVALID_ARG in error:
raise vbox_exc.VBoxInvalid(reason=error)
raise vbox_exc.VBoxManageError(method=cls.SHOW_HD_INFO,
reason=error)
return output
@classmethod
def create_hd(cls, filename, size=None,
disk_format=constants.DISK_FORMAT_VDI,
variant=constants.VARIANT_STANDARD, parent=None):
"""Creates a new virtual hard disk image.
:param filename: the file name for the hard disk image
:param size: the image capacity, in MiB units
:param disk_format: file format for the output file
(default: DISK_FORMAT_VDI)
:param variant: file format variant for the output file
(default: VARIANT_STANDARD)
:param parent:
:return: UUID for the disk image created
"""
if disk_format not in constants.ALL_DISK_FORMATS:
raise exception.InvalidDiskFormat(disk_format=disk_format)
if variant not in constants.ALL_VARIANTS:
raise vbox_exc.VBoxValueNotAllowed(
argument="variant", value=variant, method=cls.CREATE_HD,
allowed_values=constants.ALL_VARIANTS)
if size and size < 1:
raise exception.InvalidDiskInfo(
reason="Disk size should be bigger than 0.")
command = [cls.CREATE_HD,
"--filename", filename,
"--format", disk_format,
"--variant", variant]
if size:
command.extend(["--size", size])
if parent:
command.extend(["--diffparent", parent])
output, error = cls._execute(*command)
if error and constants.DONE not in error:
if constants.VBOX_E_FILE_ERROR in error:
raise exception.DestinationDiskExists(path=filename)
raise vbox_exc.VBoxManageError(method=cls.CREATE_HD, reason=error)
# The ouput should look like:
# Disk image created. UUID: 6917a94b-ecb0-4996-8ab8-5e4ef8f9539a
for line in output.splitlines():
if "UUID:" not in line:
continue
hd_uuid = line.split("UUID:")[1].strip()
break
else:
# TODO(alexandrucoman): Fail to get UUID (Something went wrong)
return
return hd_uuid
@classmethod
def clone_hd(cls, vhd_path, new_vdh_path, disk_format=None, variant=None,
existing=False):
"""Duplicate a registered virtual hard disk image to a new image
file with a new unique identifier.
:param vhd_path: path for the input virtual hard drive
:param new_vdh_path: path for the output virtual hard drive
:param disk_format: file format for the output file
(default: DISK_FORMAT_VDI)
:param variant: file format variant for the output file
(default: VARIANT_STANDARD)
:param existing: perform the clone operation to an already
existing destination medium.
"""
command = [cls.CLONE_HD, vhd_path, new_vdh_path]
if disk_format:
command.extend(["--format", disk_format])
if variant:
command.extend(["--variant", variant])
if existing:
command.append("--existing")
_, error = cls._execute(*command)
if error and constants.DONE not in error:
if constants.VBOX_E_FILE_ERROR in error:
LOG.debug("Fail to clone hd: %(error)s", {"error": error})
raise exception.DestinationDiskExists(path=new_vdh_path)
raise vbox_exc.VBoxManageError(method=cls.CLONE_HD, reason=error)
@classmethod
def create_vm(cls, name, basefolder=None, register=False):
"""Creates a new XML virtual machine definition file.
:param name: the name of the virtual machine
:param basefolder: the path for virtual machine
:param register: import a virtual machine definition in
an XML file into VirtualBox
:type register: bool
:return: UUID for the disk image created
.. note::
If the basefolder is provided, the machine folder will be
named with :param basefolder: dirname. In this case, the
names of the file and the folder will not change if the
virtual machine is renamed.
"""
command = [cls.CREATE_VM, "--name", name]
if basefolder:
command.extend(["--basefolder", basefolder])
if register:
command.extend(["--register"])
output, error = cls._execute(*command)
if error:
if constants.VBOX_E_FILE_ERROR in error:
path = name if not basefolder else os.path.join(basefolder,
name)
raise exception.DestinationDiskExists(path=path)
raise vbox_exc.VBoxManageError(method=cls.CREATE_VM, reason=error)
for line in output.splitlines():
if "UUID:" not in line:
continue
vm_uuid = line.split("UUID:")[1].strip()
break
else:
# TODO(alexandrucoman): Fail to get UUID (Something went wrong)
return
return vm_uuid
@classmethod
def storage_ctl(cls, instance, name, system_bus, controller):
"""Attach or modify a storage controller.
:param instance: nova.objects.instance.Instance
:param name: name of the storage controller.
:param system_bus: type of the system bus to which the storage
controller must be connected.
:param controller: type of chipset being emulated for the given
storage controller.
"""
_, error = cls._execute(cls.STORAGE_CTL, instance.name,
"--name", name,
"--add", system_bus,
"--controller", controller)
if error:
# TODO(alexandrucoman): Check for specific error code
# like constants.NS_ERROR_INVALID_ARG
raise vbox_exc.VBoxManageError(method=cls.STORAGE_CTL,
reason=error)
@classmethod
def storage_attach(cls, instance, controller, port, device, drive_type,
medium):
"""Attach, modify or remove a storage medium connected to a
storage controller.
:param controller: name of the storage controller.
:param port: the number of the storage controller's port
which is to be modified.
:param device: the number of the port's device which is to
be modified.
:param drive_type: define the type of the drive to which the
medium is being attached.
:param medium: specifies what is to be attached
"""
if drive_type not in constants.ALL_STORAGES:
raise vbox_exc.VBoxValueNotAllowed(
argument="drive_type", value=drive_type,
method="storage_attach",
allowed_values=constants.ALL_STORAGES)
_, error = cls._storageattach(instance, controller, port, device,
drive_type, medium)
if error:
raise vbox_exc.VBoxManageError(method="storageattach",
reason=error)
@classmethod
def scsi_storage_attach(cls, instance, controller, port, device,
connection_info, initiator):
"""Attach a storage medium using ISCSI.
:param controller: name of the storage controller.
:param port: the number of the storage controller's port
which is to be modified.
:param device: the number of the port's device which is to
be modified.
:param connection_info: information regarding the iSCSI portal and
volume
"""
data = connection_info['data']
auth_username = data.get('auth_username')
auth_password = data.get('auth_password')
try:
portal_ip, portal_port = data['target_portal'].split(':')
except ValueError:
portal_ip = data['target_portal']
portal_port = constants.DEFAULT_PORTAL_PORT
information = [constants.FIELD_PORTAL, portal_ip,
constants.FIELD_PORTAL_PORT, portal_port,
constants.FIELD_LUN, data['target_lun'],
constants.FIELD_TARGET, data['target_iqn'],
constants.FIELD_INITIATOR, initiator]
if auth_password and auth_username:
information.extend([constants.FIELD_USERNAME, auth_username,
constants.FIELD_PASSWORD, auth_password])
_, error = cls._storageattach(instance, controller, port, device,
constants.STORAGE_HDD,
constants.MEDIUM_ISCSI,
*information)
if error:
raise vbox_exc.VBoxManageError(method="storageattach",
reason=error)
@classmethod
def unregister_vm(cls, instance, delete=True):
"""Unregister a virtual machine.
If delete is True, the following files will automatically
be deleted as well:
* all hard disk image files, including differencing files,
which are used by the machine and not shared with other machines;
* saved state files that the machine created
* the machine XML file and its backups;
* the machine log files;
* the machine directory, if it is empty after having deleted
all the above;
"""
command = [cls.UNREGISTER_VM, instance.name]
if delete:
command.append('--delete')
_, error = cls._execute(*command)
if error and constants.DONE not in error:
cls._check_stderr(error, instance, cls.UNREGISTER_VM)
raise vbox_exc.VBoxManageError(method=cls.UNREGISTER_VM,
reason=error)
@classmethod
def take_snapshot(cls, instance, name, description=None, live=None):
"""Take a snapshot of the current state of the virtual machine.
:param instance: nova.objects.instance.Instance
:param name: (str) snapshot name
:param description: (str) snapshot description
:param live: (bool)
.. note::
If live is specified, the VM will not be stopped during
the snapshot creation (live smapshotting).
"""
command = [cls.SNAPSHOT, instance.name, 'take', name]
if description:
command.extend(['--description', description])
if live:
command.append('--live')
_, error = cls._execute(*command)
if error and constants.DONE not in error:
raise vbox_exc.VBoxManageError(method="snapshot", reason=error)
@classmethod
def delete_snapshot(cls, instance, name):
"""Delete a snapshot (specified by name or by UUID).
.. note::
This can take a while to finish since the differencing images
associated with the snapshot might need to be merged with their
child differencing images.
"""
_, error = cls._execute(cls.SNAPSHOT, instance.name, 'delete', name)
if error and constants.DONE not in error:
raise vbox_exc.VBoxManageError(method="snapshot", reason=error)
@classmethod
def set_vhd_uuid(cls, disk):
"""Assign a new UUID to the given image file.
This way, multiple copies of a container can be registered.
"""
_, error = cls._execute('internalcommands', 'sethduuid', disk)
if error:
raise vbox_exc.VBoxManageError(method="sethduuid",
reason=error)
@classmethod
def set_disk_parent_uuid(cls, disk_file, parent_uuid):
"""Assigns a new parent UUID to the given image file."""
_, error = cls._execute('internalcommands', 'sethdparentuuid',
disk_file, parent_uuid)
if error:
raise vbox_exc.VBoxManageError(method="sethdparentuuid",
reason=error)
@classmethod
def close_medium(cls, medium, path, delete=False):
"""Remove a medium from a VirtualBox media registry."""
command = [cls.CLOSE_MEDIUM, medium, path]
if delete:
command.append("--delete")
_, error = cls._execute(*command)
if error and constants.DONE not in error:
raise vbox_exc.VBoxManageError(method=cls.CLOSE_MEDIUM,
reason=error)
| |
"""
weasyprint.tests.test_presentational_hints
------------------------------------------
Test the HTML presentational hints.
"""
from weasyprint import CSS, HTML
from .testing_utils import BASE_URL, assert_no_logs
PH_TESTING_CSS = CSS(string='''
@page {margin: 0; size: 1000px 1000px}
body {margin: 0}
''')
@assert_no_logs
def test_no_ph():
# Test both CSS and non-CSS rules
document = HTML(string='''
<hr size=100 />
<table align=right width=100><td>0</td></table>
''').render(stylesheets=[PH_TESTING_CSS])
page, = document.pages
html, = page._page_box.children
body, = html.children
hr, table = body.children
assert hr.border_height() != 100
assert table.position_x == 0
@assert_no_logs
def test_ph_page():
document = HTML(string='''
<body marginheight=2 topmargin=3 leftmargin=5
bgcolor=red text=blue />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
assert body.margin_top == 2
assert body.margin_bottom == 2
assert body.margin_left == 5
assert body.margin_right == 0
assert body.style['background_color'] == (1, 0, 0, 1)
assert body.style['color'] == (0, 0, 1, 1)
@assert_no_logs
def test_ph_flow():
document = HTML(string='''
<pre wrap></pre>
<center></center>
<div align=center></div>
<div align=middle></div>
<div align=left></div>
<div align=right></div>
<div align=justify></div>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
pre, center, div1, div2, div3, div4, div5 = body.children
assert pre.style['white_space'] == 'pre-wrap'
assert center.style['text_align_all'] == 'center'
assert div1.style['text_align_all'] == 'center'
assert div2.style['text_align_all'] == 'center'
assert div3.style['text_align_all'] == 'left'
assert div4.style['text_align_all'] == 'right'
assert div5.style['text_align_all'] == 'justify'
@assert_no_logs
def test_ph_phrasing():
document = HTML(string='''
<style>@font-face {
src: url(weasyprint.otf); font-family: weasyprint
}</style>
<br clear=left>
<br clear=right />
<br clear=both />
<br clear=all />
<font color=red face=weasyprint size=7></font>
<Font size=4></Font>
<font size=+5 ></font>
<font size=-5 ></font>
''', base_url=BASE_URL).render(
stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
line1, line2, line3, line4, line5 = body.children
br1, = line1.children
br2, = line2.children
br3, = line3.children
br4, = line4.children
font1, font2, font3, font4 = line5.children
assert br1.style['clear'] == 'left'
assert br2.style['clear'] == 'right'
assert br3.style['clear'] == 'both'
assert br4.style['clear'] == 'both'
assert font1.style['color'] == (1, 0, 0, 1)
assert font1.style['font_family'] == ('weasyprint',)
assert font1.style['font_size'] == 1.5 * 2 * 16
assert font2.style['font_size'] == 6 / 5 * 16
assert font3.style['font_size'] == 1.5 * 2 * 16
assert font4.style['font_size'] == 8 / 9 * 16
@assert_no_logs
def test_ph_lists():
document = HTML(string='''
<ol>
<li type=A></li>
<li type=1></li>
<li type=a></li>
<li type=i></li>
<li type=I></li>
</ol>
<ul>
<li type=circle></li>
<li type=disc></li>
<li type=square></li>
</ul>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
ol, ul = body.children
oli1, oli2, oli3, oli4, oli5 = ol.children
uli1, uli2, uli3 = ul.children
assert oli1.style['list_style_type'] == 'upper-alpha'
assert oli2.style['list_style_type'] == 'decimal'
assert oli3.style['list_style_type'] == 'lower-alpha'
assert oli4.style['list_style_type'] == 'lower-roman'
assert oli5.style['list_style_type'] == 'upper-roman'
assert uli1.style['list_style_type'] == 'circle'
assert uli2.style['list_style_type'] == 'disc'
assert uli3.style['list_style_type'] == 'square'
@assert_no_logs
def test_ph_lists_types():
document = HTML(string='''
<ol type=A></ol>
<ol type=1></ol>
<ol type=a></ol>
<ol type=i></ol>
<ol type=I></ol>
<ul type=circle></ul>
<ul type=disc></ul>
<ul type=square></ul>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
ol1, ol2, ol3, ol4, ol5, ul1, ul2, ul3 = body.children
assert ol1.style['list_style_type'] == 'upper-alpha'
assert ol2.style['list_style_type'] == 'decimal'
assert ol3.style['list_style_type'] == 'lower-alpha'
assert ol4.style['list_style_type'] == 'lower-roman'
assert ol5.style['list_style_type'] == 'upper-roman'
assert ul1.style['list_style_type'] == 'circle'
assert ul2.style['list_style_type'] == 'disc'
assert ul3.style['list_style_type'] == 'square'
@assert_no_logs
def test_ph_tables():
document = HTML(string='''
<table align=left rules=none></table>
<table align=right rules=groups></table>
<table align=center rules=rows></table>
<table border=10 cellspacing=3 bordercolor=green>
<thead>
<tr>
<th valign=top></th>
</tr>
</thead>
<tr>
<td nowrap><h1 align=right></h1><p align=center></p></td>
</tr>
<tr>
</tr>
<tfoot align=justify>
<tr>
<td></td>
</tr>
</tfoot>
</table>
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
wrapper1, wrapper2, wrapper3, wrapper4, = body.children
assert wrapper1.style['float'] == 'left'
assert wrapper2.style['float'] == 'right'
assert wrapper3.style['margin_left'] == 'auto'
assert wrapper3.style['margin_right'] == 'auto'
assert wrapper1.children[0].style['border_left_style'] == 'hidden'
assert wrapper1.style['border_collapse'] == 'collapse'
assert wrapper2.children[0].style['border_left_style'] == 'hidden'
assert wrapper2.style['border_collapse'] == 'collapse'
assert wrapper3.children[0].style['border_left_style'] == 'hidden'
assert wrapper3.style['border_collapse'] == 'collapse'
table4, = wrapper4.children
assert table4.style['border_top_style'] == 'outset'
assert table4.style['border_top_width'] == 10
assert table4.style['border_spacing'] == (3, 3)
r, g, b, a = table4.style['border_left_color']
assert g > r and g > b
head_group, rows_group, foot_group = table4.children
head, = head_group.children
th, = head.children
assert th.style['vertical_align'] == 'top'
line1, line2 = rows_group.children
td, = line1.children
assert td.style['white_space'] == 'nowrap'
assert td.style['border_top_width'] == 1
assert td.style['border_top_style'] == 'inset'
h1, p = td.children
assert h1.style['text_align_all'] == 'right'
assert p.style['text_align_all'] == 'center'
foot, = foot_group.children
tr, = foot.children
assert tr.style['text_align_all'] == 'justify'
@assert_no_logs
def test_ph_hr():
document = HTML(string='''
<hr align=left>
<hr align=right />
<hr align=both color=red />
<hr align=center noshade size=10 />
<hr align=all size=8 width=100 />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
hr1, hr2, hr3, hr4, hr5 = body.children
assert hr1.margin_left == 0
assert hr1.style['margin_right'] == 'auto'
assert hr2.style['margin_left'] == 'auto'
assert hr2.margin_right == 0
assert hr3.style['margin_left'] == 'auto'
assert hr3.style['margin_right'] == 'auto'
assert hr3.style['color'] == (1, 0, 0, 1)
assert hr4.style['margin_left'] == 'auto'
assert hr4.style['margin_right'] == 'auto'
assert hr4.border_height() == 10
assert hr4.style['border_top_width'] == 5
assert hr5.border_height() == 8
assert hr5.height == 6
assert hr5.width == 100
assert hr5.style['border_top_width'] == 1
@assert_no_logs
def test_ph_embedded():
document = HTML(string='''
<object data="data:image/svg+xml,<svg></svg>"
align=top hspace=10 vspace=20></object>
<img src="data:image/svg+xml,<svg></svg>" alt=text
align=right width=10 height=20 />
<embed src="data:image/svg+xml,<svg></svg>" align=texttop />
''').render(stylesheets=[PH_TESTING_CSS], presentational_hints=True)
page, = document.pages
html, = page._page_box.children
body, = html.children
line, = body.children
object_, text1, img, embed, text2 = line.children
assert embed.style['vertical_align'] == 'text-top'
assert object_.style['vertical_align'] == 'top'
assert object_.margin_top == 20
assert object_.margin_left == 10
assert img.style['float'] == 'right'
assert img.width == 10
assert img.height == 20
| |
import logging
import urwid
from itertools import groupby
from mpd import CommandError as MpdCommandError
import suggestive.signals as signals
import suggestive.mstat as mstat
import suggestive.widget as widget
from suggestive.mvc.base import View, Model, Controller
from suggestive.buffer import Buffer
logger = logging.getLogger('suggestive.scrobbles')
logger.addHandler(logging.NullHandler())
######################################################################
# Models
######################################################################
class DayModel(Model):
def __init__(self, date):
super(DayModel, self).__init__()
self._date = date
@property
def date(self):
return self._date
class PlayModel(Model):
def __init__(self, db_track):
super(PlayModel, self).__init__()
self._db_track = db_track
@property
def db_track(self):
return self._db_track
@property
def db_artist(self):
return self._db_track.artist
@property
def db_album(self):
return self._db_track.album
@property
def loved(self):
info = self._db_track.lastfm_info
return info and info.loved
class ScrobbleModel(Model):
def __init__(self, db_scrobble):
super(ScrobbleModel, self).__init__()
self._db_scrobble = db_scrobble
@property
def db_scrobble(self):
return self._db_scrobble
@db_scrobble.setter
def db_scrobble(self, scrobble):
self._db_scrobble = scrobble
self.update()
@property
def date(self):
return self.db_scrobble.time.date()
@property
def db_track(self):
return self._db_scrobble.track
@property
def db_artist(self):
return self.db_track.artist
@property
def db_album(self):
return self.db_track.album
@property
def loved(self):
info = self.db_track.lastfm_info
return info and info.loved
class ScrobbleListModel(Model):
def __init__(self):
super(ScrobbleListModel, self).__init__()
self._scrobbles = [] # Last.FM scrobbles
self._plays = [] # Local plays that may not have been scrobbled
def __repr__(self):
return '<ScrobbleListModel>'
@property
def scrobbles(self):
return self._scrobbles
@scrobbles.setter
def scrobbles(self, newscrobbles):
self._scrobbles = newscrobbles
self.update()
@property
def plays(self):
return self._plays
@plays.setter
def plays(self, newplays):
self._plays = newplays
self.update()
######################################################################
# Controller
######################################################################
class ScrobbleListController(Controller):
def __init__(self, model, conf, loop):
super(ScrobbleListController, self).__init__(model, conf, loop)
self.current_song_id = None
def load_more_scrobbles(self, position):
n_items = len(self.model.scrobbles)
n_load = 1 + position - n_items
# TODO: Convert using just conf
scrobbles = mstat.get_scrobbles(self.conf, n_load, n_items)
models = [ScrobbleModel(scrobble) for scrobble in scrobbles]
if models:
self.model.scrobbles += models
def reload(self):
"""Re-fetch the list of scrobbles from the database"""
logger.debug('Reload scrobbles')
scrobbles = mstat.get_scrobbles(
self.conf, len(self.model.scrobbles), 0)
models = [ScrobbleModel(scrobble) for scrobble in scrobbles]
self.model.scrobbles = models
def insert_new_song_played(self):
mpd = mstat.initialize_mpd(self.conf)
status = mpd.status()
songid = status.get('songid')
if songid != self.current_song_id:
try:
info = mpd.playlistid(songid)[0]
db_track = mstat.database_track_from_mpd(
self.conf,
info)
play_model = PlayModel(db_track)
self.model.plays.insert(0, play_model)
self.model.update()
logger.debug('Plays: {}'.format(self.model.plays))
self.current_song_id = songid
except (MpdCommandError, IndexError):
pass
######################################################################
# Views
######################################################################
class DayView(urwid.WidgetWrap, View):
def __init__(self, model):
View.__init__(self, model)
icon = urwid.Text(self.text)
view = urwid.AttrMap(icon, 'scrobble date')
urwid.WidgetWrap.__init__(self, view)
@property
def text(self):
return self.model.date.strftime('%Y-%m-%d')
class ScrobbleView(urwid.WidgetWrap, View, widget.Searchable):
__metaclass__ = urwid.signals.MetaSignals
signals = []
TRACK_FORMAT = '{artist} - {album} - {title}{suffix}'
STYLES = ('scrobble', 'focus scrobble')
def __init__(self, model, controller):
View.__init__(self, model)
self._controller = controller
self._icon = urwid.SelectableIcon(self.text)
view = urwid.AttrMap(self._icon, *self.STYLES)
urwid.WidgetWrap.__init__(self, view)
@property
def text(self):
model = self.model
if model.loved:
suffix = ' [L]'
else:
suffix = ''
return self.TRACK_FORMAT.format(
artist=model.db_artist.name,
album=model.db_album.name,
title=model.db_track.name,
suffix=suffix)
@property
def canonical_text(self):
model = self.model
return self.TRACK_FORMAT.format(
artist=model.db_artist.name,
album=model.db_album.name,
title=model.name,
suffix='')
@property
def searchable_text(self):
return self.canonical_text
class ScrobbleListWalker(urwid.ListWalker):
def __init__(self, model, controller, conf):
# I think plays are local plays, not scrobbles
self._model = model
self._controller = controller
self._conf = conf
self.focus = 0
self.views = self._generate_plays()
# TODO: Hook in conf.initial_scrobbles()
@property
def controller(self):
return self._controller
@property
def model(self):
return self._model
def __iter__(self):
return iter(self.views)
def size(self):
return len(self.model.scrobbles)
def __len__(self):
return len(self.views)
def _generate_plays(self):
if not self.model.plays:
return []
plays = [ScrobbleView(model, self.controller)
for model in self.model.plays]
header = urwid.AttrMap(urwid.Text('Plays'), 'scrobble date')
return [header] + plays
def _generate_views(self, models):
last_date = None
# last_date = next(
# (v.model.date for v in self.views if isinstance(v, DayView)),
# None)
for date, group in groupby(models, lambda model: model.date):
group = list(group)
if date != last_date:
last_date = date
yield DayView(DayModel(date))
for model in group:
yield ScrobbleView(model, self.controller)
def _load_more(self, position):
self.controller.load_more_scrobbles(position)
def update_views(self):
views = self._generate_plays()
views.extend(self._generate_views(self.model.scrobbles))
self.views = views
# ListWalker Overrides
def __getitem__(self, idx):
return urwid.SelectableIcon(str(idx))
def get_focus(self):
return self._get(self.focus)
def set_focus(self, focus):
self.focus = focus
self._modified()
def get_next(self, current):
return self._get(current + 1)
def get_prev(self, current):
return self._get(current - 1)
def _get(self, pos):
if pos < 0:
return None, None
if pos >= len(self.views):
logger.debug('Position {} >= {}'.format(pos, len(self.views)))
self._load_more(pos)
if pos >= len(self.views):
return None, None
return self.views[pos], pos
class ScrobbleListView(widget.SuggestiveListBox, View):
__metaclass__ = urwid.signals.MetaSignals
signals = [signals.NEXT_TRACK, signals.PREVIOUS_TRACK]
def __init__(self, model, controller, conf):
View.__init__(self, model)
self._controller = controller
self._conf = conf
# TODO: parameters
walker = self.create_walker()
widget.SuggestiveListBox.__init__(self, walker)
def create_walker(self):
return ScrobbleListWalker(
self.model,
self._controller,
self._conf)
def update(self):
self.body.update_views()
class ScrobbleBuffer(Buffer):
def __init__(self, conf, loop):
self.conf = conf
self.model = ScrobbleListModel()
self.controller = ScrobbleListController(self.model, conf, loop)
self.view = ScrobbleListView(self.model, self.controller, conf)
super(ScrobbleBuffer, self).__init__(self.view)
self.update_status('Scrobbles')
self.controller.load_more_scrobbles(conf.scrobbles.initial_load)
@property
def body(self):
return self.contents['body'][0]
def update(self, *args):
self.controller.insert_new_song_played()
# We may have dirtied up the list walker, so force a redraw by
# invalidating the cached canvas for the body, then setting focus on it
self.body._invalidate()
self.set_focus('body')
def reload(self, *args):
self.controller.reload()
def search(self, searcher):
# self.scrobble_list.search(searcher)
pass
def next_search(self):
# self.scrobble_list.next_search_item()
pass
| |
# Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import mock
import webob.exc
from oslo_utils import uuidutils
from neutron.api import extensions as api_extensions
from neutron.db import servicetype_db as sdb
from neutron.tests.unit.db import test_db_base_plugin_v2
from networking_bgpvpn.neutron.db import bgpvpn_db
from networking_bgpvpn.neutron import extensions
from networking_bgpvpn.neutron.services.common import constants
from networking_bgpvpn.neutron.services import plugin
from networking_bgpvpn.neutron.services.service_drivers import driver_api
_uuid = uuidutils.generate_uuid
class BgpvpnTestCaseMixin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self, service_provider=None):
if not service_provider:
provider = (constants.BGPVPN +
':dummy:networking_bgpvpn.neutron.services.'
'service_drivers.driver_api.BGPVPNDriver:default')
else:
provider = (constants.BGPVPN + ':test:' + service_provider +
':default')
bits = provider.split(':')
provider = {
'service_type': bits[0],
'name': bits[1],
'driver': bits[2]
}
if len(bits) == 4:
provider['default'] = True
# override the default service provider
self.service_providers = (
mock.patch.object(sdb.ServiceTypeManager,
'get_service_providers').start())
self.service_providers.return_value = [provider]
bgpvpn_plugin_str = ('networking_bgpvpn.neutron.services.plugin.'
'BGPVPNPlugin')
service_plugins = {'bgpvpn_plugin': bgpvpn_plugin_str}
self.bgpvpn_plugin = plugin.BGPVPNPlugin()
extensions_path = ':'.join(extensions.__path__)
ext_mgr = api_extensions.PluginAwareExtensionManager(
extensions_path,
{constants.BGPVPN: self.bgpvpn_plugin})
super(BgpvpnTestCaseMixin, self).setUp(
service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.bgpvpn_data = {'bgpvpn': {'name': 'bgpvpn1',
'type': 'l3',
'route_targets': ['1234:56'],
'auto_aggregate': False,
'tenant_id': self._tenant_id}}
self.converted_data = copy.copy(self.bgpvpn_data)
self.converted_data['bgpvpn'].update({'export_targets': [],
'import_targets': [],
'route_distinguishers': []})
@contextlib.contextmanager
def bgpvpn(self, do_delete=True, **kwargs):
fmt = 'json'
tenant_id = kwargs.get('tenant_id') if 'tenant_id' in kwargs\
else self._tenant_id
if(kwargs.get('data')):
bgpvpn_data = kwargs.get('data')
else:
bgpvpn_data = {'bgpvpn': {'name': 'bgpvpn1',
'type': 'l3',
'route_targets': ['1234:56'],
'auto_aggregate': False,
'tenant_id': tenant_id}}
bgpvpn_req = self.new_create_request(
'bgpvpn/bgpvpns', bgpvpn_data, fmt=fmt)
res = bgpvpn_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
bgpvpn = self.deserialize('json', res)
yield bgpvpn
if do_delete:
self._delete('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'])
@contextlib.contextmanager
def assoc_net(self, bgpvpn_id, net_id, do_disassociate=True):
data = {'network_id': net_id}
assoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=data, fmt=self.fmt, id=bgpvpn_id,
action='associate_network')
res = assoc_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
yield
if do_disassociate:
disassoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=data, fmt=self.fmt,
id=bgpvpn_id,
action='disassociate_network')
res = disassoc_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
class TestBGPVPNServicePlugin(BgpvpnTestCaseMixin):
def setUp(self):
super(TestBGPVPNServicePlugin, self).setUp()
@mock.patch.object(plugin.BGPVPNPlugin, '_validate_network_body')
def test_associate_network(self, mock_validate):
with self.network() as net:
net_id = net['network']['id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
mock_validate.return_value = net['network']
with self.assoc_net(id, net_id):
net_body = {'network_id': net['network']['id']}
mock_validate.assert_called_once_with(mock.ANY, id,
net_body)
@mock.patch.object(plugin.BGPVPNPlugin, '_validate_network_body')
def test_disassociate_network(self, mock_validate, ):
with self.network() as net:
net_id = net['network']['id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
mock_validate.return_value = net['network']
with self.assoc_net(id, net_id, do_disassociate=False):
mock_validate.reset_mock()
net_body = {'network_id': net['network']['id']}
disassoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=net_body, fmt=self.fmt,
id=id,
action='disassociate_network')
res = disassoc_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
mock_validate.assert_called_once_with(mock.ANY, id,
net_body)
def test_associate_empty_network(self):
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
data = {}
assoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=data, fmt=self.fmt, id=id,
action='associate_network')
res = assoc_req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_associate_unknown_network(self):
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
data = {'network_id': 'unknown_uuid'}
assoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=data, fmt=self.fmt, id=id,
action='associate_network')
res = assoc_req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_associate_unauthorized_net(self):
with self.network() as net:
net_id = net['network']['id']
with self.bgpvpn(tenant_id='another_tenant') as bgpvpn:
id = bgpvpn['bgpvpn']['id']
data = {'network_id': net_id}
assoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=data, fmt=self.fmt, id=id,
action='associate_network')
res = assoc_req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPForbidden.code)
class TestBGPVPNServiceDriverDB(BgpvpnTestCaseMixin):
def setUp(self):
super(TestBGPVPNServiceDriverDB, self).setUp()
@mock.patch.object(driver_api.BGPVPNDriver,
'create_bgpvpn_postcommit')
@mock.patch.object(bgpvpn_db.BGPVPNPluginDb, 'create_bgpvpn')
def test_create_bgpvpn(self, mock_create_db, mock_create_postcommit):
mock_create_db.return_value = self.converted_data
with self.bgpvpn(do_delete=False):
mock_create_db.assert_called_once_with(mock.ANY,
self.converted_data)
mock_create_postcommit.assert_called_once_with(mock.ANY,
self.converted_data)
@mock.patch.object(driver_api.BGPVPNDriver,
'delete_bgpvpn_postcommit')
def test_delete_bgpvpn(self, mock_delete_postcommit):
with self.bgpvpn(do_delete=False) as bgpvpn:
patcher = mock.patch.object(bgpvpn_db.BGPVPNPluginDb,
'delete_bgpvpn',
return_value=self.converted_data)
mock_delete_db = patcher.start()
self._delete('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'])
mock_delete_db.assert_called_once_with(mock.ANY,
bgpvpn['bgpvpn']['id'])
mock_delete_postcommit.assert_called_once_with(mock.ANY,
self.converted_data)
patcher.stop()
self._delete('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'])
@mock.patch.object(bgpvpn_db.BGPVPNPluginDb, 'get_bgpvpn')
def test_get_bgpvpn(self, mock_get_db):
with self.bgpvpn() as bgpvpn:
self._show('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id'])
mock_get_db.assert_called_once_with(mock.ANY,
bgpvpn['bgpvpn']['id'],
mock.ANY)
def test_get_bgpvpn_with_net(self):
with self.network() as net:
net_id = net['network']['id']
with self.bgpvpn() as bgpvpn:
with self.assoc_net(bgpvpn['bgpvpn']['id'], net_id=net_id):
res = self._show('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id'])
self.assertIn('networks', res['bgpvpn'])
self.assertEqual(net_id,
res['bgpvpn']['networks'][0])
@mock.patch.object(driver_api.BGPVPNDriver,
'update_bgpvpn_postcommit')
@mock.patch.object(bgpvpn_db.BGPVPNPluginDb,
'update_bgpvpn')
def test_update_bgpvpn(self, mock_update_db, mock_update_postcommit):
with self.bgpvpn() as bgpvpn:
new_data = {"bgpvpn": {"name": "foo"}}
old_bgpvpn = copy.copy(self.bgpvpn_data['bgpvpn'])
old_bgpvpn['id'] = bgpvpn['bgpvpn']['id']
old_bgpvpn['networks'] = []
new_bgpvpn = copy.copy(old_bgpvpn)
new_bgpvpn['name'] = 'foo'
mock_update_db.return_value = new_bgpvpn
self._update('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'],
new_data)
mock_update_db.assert_called_once_with(
mock.ANY, bgpvpn['bgpvpn']['id'], new_data)
mock_update_postcommit.assert_called_once_with(
mock.ANY, old_bgpvpn, new_bgpvpn)
@mock.patch.object(driver_api.BGPVPNDriver,
'associate_network_postcommit')
@mock.patch.object(bgpvpn_db.BGPVPNPluginDb,
'associate_network')
def test_associate_network(self, mock_assoc_db, mock_assoc_postcommit):
with self.network() as net:
net_id = net['network']['id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
with self.assoc_net(id, net_id=net_id):
mock_assoc_db.assert_called_once_with(mock.ANY,
id,
net_id)
mock_assoc_postcommit.assert_called_once_with(mock.ANY,
id,
net_id)
@mock.patch.object(driver_api.BGPVPNDriver,
'disassociate_network_postcommit')
@mock.patch.object(bgpvpn_db.BGPVPNPluginDb,
'disassociate_network')
def test_disassociate_network(self, mock_disassoc_db,
mock_disassoc_postcommit):
with self.network() as net:
net_id = net['network']['id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
data = {'network_id': net_id}
disassoc_req = self._req('PUT', 'bgpvpn/bgpvpns',
data=data, fmt=self.fmt, id=id,
action='disassociate_network')
res = disassoc_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
mock_disassoc_db.assert_called_once_with(mock.ANY,
id,
net_id)
mock_disassoc_postcommit.assert_called_once_with(mock.ANY,
id,
net_id)
| |
import logging
from typing import Callable, Iterable, List, Optional, Type, Union
from ray.rllib.agents.trainer import Trainer, COMMON_CONFIG
from ray.rllib.env.env_context import EnvContext
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.policy import Policy
from ray.rllib.utils import add_mixins
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.typing import (
EnvCreator,
EnvType,
PartialTrainerConfigDict,
ResultDict,
TrainerConfigDict,
)
from ray.tune.logger import Logger
logger = logging.getLogger(__name__)
@Deprecated(
new="Sub-class from Trainer (or another Trainer sub-class) directly! "
"See e.g. ray.rllib.agents.dqn.dqn.py for an example.",
error=False,
)
def build_trainer(
name: str,
*,
default_config: Optional[TrainerConfigDict] = None,
validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,
default_policy: Optional[Type[Policy]] = None,
get_policy_class: Optional[
Callable[[TrainerConfigDict], Optional[Type[Policy]]]
] = None,
validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,
before_init: Optional[Callable[[Trainer], None]] = None,
after_init: Optional[Callable[[Trainer], None]] = None,
before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,
mixins: Optional[List[type]] = None,
execution_plan: Optional[
Union[
Callable[[WorkerSet, TrainerConfigDict], Iterable[ResultDict]],
Callable[[Trainer, WorkerSet, TrainerConfigDict], Iterable[ResultDict]],
]
] = None,
allow_unknown_configs: bool = False,
allow_unknown_subkeys: Optional[List[str]] = None,
override_all_subkeys_if_type_changes: Optional[List[str]] = None,
) -> Type[Trainer]:
"""Helper function for defining a custom Trainer class.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_policy.
2. Worker setup: before_init, execution_plan.
3. Post setup: after_init.
Args:
name: name of the trainer (e.g., "PPO")
default_config: The default config dict of the algorithm,
otherwise uses the Trainer default config.
validate_config: Optional callable that takes the config to check
for correctness. It may mutate the config as needed.
default_policy: The default Policy class to use if `get_policy_class`
returns None.
get_policy_class: Optional callable that takes a config and returns
the policy class or None. If None is returned, will use
`default_policy` (which must be provided then).
validate_env: Optional callable to validate the generated environment
(only on worker=0).
before_init: Optional callable to run before anything is constructed
inside Trainer (Workers with Policies, execution plan, etc..).
Takes the Trainer instance as argument.
after_init: Optional callable to run at the end of trainer init
(after all Workers and the exec. plan have been constructed).
Takes the Trainer instance as argument.
before_evaluate_fn: Callback to run before evaluation. This takes
the trainer instance as argument.
mixins: List of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class.
execution_plan: Optional callable that sets up the
distributed execution workflow.
allow_unknown_configs: Whether to allow unknown top-level config keys.
allow_unknown_subkeys: List of top-level keys
with value=dict, for which new sub-keys are allowed to be added to
the value dict. Appends to Trainer class defaults.
override_all_subkeys_if_type_changes: List of top level keys with
value=dict, for which we always override the entire value (dict),
iff the "type" key in that value dict changes. Appends to Trainer
class defaults.
Returns:
A Trainer sub-class configured by the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy_class = default_policy
def __init__(
self,
config: TrainerConfigDict = None,
env: Union[str, EnvType, None] = None,
logger_creator: Callable[[], Logger] = None,
remote_checkpoint_dir: Optional[str] = None,
sync_function_tpl: Optional[str] = None,
):
Trainer.__init__(
self,
config,
env,
logger_creator,
remote_checkpoint_dir,
sync_function_tpl,
)
@override(base)
def setup(self, config: PartialTrainerConfigDict):
if allow_unknown_subkeys is not None:
self._allow_unknown_subkeys += allow_unknown_subkeys
self._allow_unknown_configs = allow_unknown_configs
if override_all_subkeys_if_type_changes is not None:
self._override_all_subkeys_if_type_changes += (
override_all_subkeys_if_type_changes
)
Trainer.setup(self, config)
def _init(self, config: TrainerConfigDict, env_creator: EnvCreator):
# No `get_policy_class` function.
if get_policy_class is None:
# Default_policy must be provided (unless in multi-agent mode,
# where each policy can have its own default policy class).
if not config["multiagent"]["policies"]:
assert default_policy is not None
# Query the function for a class to use.
else:
self._policy_class = get_policy_class(config)
# If None returned, use default policy (must be provided).
if self._policy_class is None:
assert default_policy is not None
self._policy_class = default_policy
if before_init:
before_init(self)
# Creating all workers (excluding evaluation workers).
self.workers = WorkerSet(
env_creator=env_creator,
validate_env=validate_env,
policy_class=self._policy_class,
trainer_config=config,
num_workers=self.config["num_workers"],
)
self.train_exec_impl = self.execution_plan(
self.workers, config, **self._kwargs_for_execution_plan()
)
if after_init:
after_init(self)
@override(Trainer)
def validate_config(self, config: PartialTrainerConfigDict):
# Call super's validation method.
Trainer.validate_config(self, config)
# Then call user defined one, if any.
if validate_config is not None:
validate_config(config)
@staticmethod
@override(Trainer)
def execution_plan(workers, config, **kwargs):
# `execution_plan` is provided, use it inside
# `self.execution_plan()`.
if execution_plan is not None:
return execution_plan(workers, config, **kwargs)
# If `execution_plan` is not provided (None), the Trainer will use
# it's already existing default `execution_plan()` static method
# instead.
else:
return Trainer.execution_plan(workers, config, **kwargs)
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
@staticmethod
@override(Trainer)
def with_updates(**overrides) -> Type[Trainer]:
"""Build a copy of this trainer class with the specified overrides.
Keyword Args:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
Returns:
Type[Trainer]: A the Trainer sub-class using `original_kwargs`
and `overrides`.
Examples:
>>> from ray.rllib.agents.ppo import PPOTrainer
>>> MyPPOClass = PPOTrainer.with_updates({"name": "MyPPO"})
>>> issubclass(MyPPOClass, PPOTrainer)
False
>>> issubclass(MyPPOClass, Trainer)
True
>>> trainer = MyPPOClass()
>>> print(trainer)
MyPPO
"""
return build_trainer(**dict(original_kwargs, **overrides))
def __repr__(self):
return self._name
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
| |
import unittest
import asynctest
import asyncio
from timeseries import TimeSeries
import numpy as np
from scipy.stats import norm
from tsdb import *
import time
import subprocess
import signal
########################################
#
# we use unit tests instead of pytests, because they facilitate the build-up
# and tear-down of the server (and avoid the tests hanging)
#
# adapted from go_server.py and go_client.py
# subprocess reference: https://docs.python.org/2/library/subprocess.html
#
# note: server code run through the subprocess is not reflected in coverage
#
########################################
class test_client(asynctest.TestCase):
# database initializations
def setUp(self):
# persistent database parameters
self.data_dir = 'db_files'
self.db_name = 'default'
self.ts_length = 100
# clear file system for testing
dir_clean = self.data_dir + '/' + self.db_name + '/'
if not os.path.exists(dir_clean):
os.makedirs(dir_clean)
filelist = [dir_clean + f for f in os.listdir(dir_clean)]
for f in filelist:
os.remove(f)
# initialize & run the server
self.server = subprocess.Popen(
['python', 'go_server_persistent.py',
'--ts_length', str(self.ts_length),
'--data_dir', self.data_dir, '--db_name', self.db_name])
time.sleep(5)
# initialize database client
self.client = TSDBClient()
# parameters for testing
self.num_ts = 25
self.num_vps = 5
# avoids the server hanging
def tearDown(self):
os.kill(self.server.pid, signal.SIGINT)
time.sleep(5)
self.client = None
def tsmaker(self, m, s, j):
'''
Helper function: randomly generates a time series for testing.
Parameters
----------
m : float
Mean value for generating time series data
s : float
Standard deviation value for generating time series data
j : float
Quantifies the "jitter" to add to the time series data
Returns
-------
A time series and associated meta data.
'''
# generate metadata
meta = {}
meta['order'] = int(np.random.choice(
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
# generate time series data
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j * np.random.randn(self.ts_length)
# return time series and metadata
return meta, TimeSeries(t, v)
# run client tests
async def test_client_ops(self):
########################################
#
# create dummy data for testing
#
########################################
# a manageable number of test time series
mus = np.random.uniform(low=0.0, high=1.0, size=self.num_ts)
sigs = np.random.uniform(low=0.05, high=0.4, size=self.num_ts)
jits = np.random.uniform(low=0.05, high=0.2, size=self.num_ts)
# initialize dictionaries for time series and their metadata
tsdict = {}
metadict = {}
# fill dictionaries with randomly generated entries for database
for i, m, s, j in zip(range(self.num_ts), mus, sigs, jits):
meta, tsrs = self.tsmaker(m, s, j) # generate data
pk = "ts-{}".format(i) # generate primary key
tsdict[pk] = tsrs # store time series data
metadict[pk] = meta # store metadata
# for testing later on
ts_keys = sorted(tsdict.keys())
# randomly choose time series as vantage points
vpkeys = sorted(list(np.random.choice(
ts_keys, size=self.num_vps, replace=False)))
vpdist = ['d_vp_' + i for i in vpkeys]
#######################################
#
# add stats trigger
#
#######################################
status, payload = await self.client.add_trigger(
'stats', 'insert_ts', ['mean', 'std'], None)
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# insert time series
#
########################################
# insert the time series and upsert the metadata
for k in tsdict:
status, payload = await self.client.insert_ts(k, tsdict[k])
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# upsert metadata
#
########################################
# insert the time series and upsert the metadata
for k in tsdict:
status, payload = await self.client.upsert_meta(k, metadict[k])
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# add vantage points
#
########################################
# add the time series as vantage points
for i in range(self.num_vps):
status, payload = await self.client.insert_vp(vpkeys[i])
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# test that all data matches
#
########################################
# note: recast time series objects as shouldn't really be
# communicating directly with client!
for k in tsdict:
# time series data
status, payload = await self.client.select({'pk': k}, ['ts'], None)
assert status == TSDBStatus.OK
assert TimeSeries(*payload[k]['ts']) == tsdict[k]
# all other metadata
status, payload = await self.client.select({'pk': k}, [])
for field in metadict[k]:
assert metadict[k][field] == payload[k][field]
########################################
#
# test that vantage points match
#
########################################
status, payload = await self.client.select(
{'vp': True}, None, {'sort_by': '+pk'})
assert status == TSDBStatus.OK
assert list(payload.keys()) == vpkeys
########################################
#
# test that vantage point distance fields have been created
#
########################################
use_keys = vpdist[:] # avoid namespace issues
status, payload = await self.client.select(
{'vp': True}, use_keys, {'sort_by': '+pk', 'limit': 1})
assert status == TSDBStatus.OK
assert sorted(list(list(payload.values())[0].keys())) == vpdist
########################################
#
# store similarity search results
#
########################################
# randomly generate query time series
_, query = self.tsmaker(np.random.uniform(low=0.0, high=1.0),
np.random.uniform(low=0.05, high=0.4),
np.random.uniform(low=0.05, high=0.2))
# vantage point similarity
status, payload = await self.client.vp_similarity_search(query, 1)
assert status == TSDBStatus.OK
assert len(payload) == 1
similarity_vp = payload.copy()
# isax similarity
status, payload = await self.client.isax_similarity_search(query)
assert ((status == TSDBStatus.OK and len(payload) == 1) or
(status == TSDBStatus.NO_MATCH and payload is None))
if payload is None:
similarity_isax = None
else:
similarity_isax = payload.copy()
########################################
#
# close database/server
#
########################################
os.kill(self.server.pid, signal.SIGINT)
time.sleep(5)
self.client = None
time.sleep(5)
########################################
#
# reload database/server
#
########################################
# initialize & run the server
self.server = subprocess.Popen(
['python', 'go_server_persistent.py',
'--ts_length', str(self.ts_length),
'--data_dir', self.data_dir, '--db_name', self.db_name])
time.sleep(5)
# initialize database client
self.client = TSDBClient()
########################################
#
# test that all data matches
#
########################################
# note: recast time series objects as shouldn't really be
# communicating directly with client!
for k in tsdict:
# time series
status, payload = await self.client.select({'pk': k}, ['ts'], None)
assert status == TSDBStatus.OK
assert TimeSeries(*payload[k]['ts']) == tsdict[k]
# all other metadata
status, payload = await self.client.select({'pk': k}, [])
for field in metadict[k]:
assert metadict[k][field] == payload[k][field]
########################################
#
# test that vantage points match
#
########################################
status, payload = await self.client.select(
{'vp': True}, None, {'sort_by': '+pk'})
assert status == TSDBStatus.OK
assert list(payload.keys()) == vpkeys
########################################
#
# test that vantage point distance fields have been created
#
########################################
use_keys = vpdist[:] # avoid namespace issues
status, payload = await self.client.select(
{'vp': True}, use_keys, {'sort_by': '+pk', 'limit': 1})
assert status == TSDBStatus.OK
assert sorted(list(list(payload.values())[0].keys())) == vpdist
########################################
#
# store similarity search results
#
########################################
# vantage point similarity
status, payload = await self.client.vp_similarity_search(query, 1)
assert status == TSDBStatus.OK
assert len(payload) == 1
assert list(payload)[0] == list(similarity_vp)[0]
# isax similarity
status, payload = await self.client.isax_similarity_search(query)
assert ((status == TSDBStatus.OK and len(payload) == 1) or
(status == TSDBStatus.NO_MATCH and payload is None))
if status == TSDBStatus.NO_MATCH:
assert payload == similarity_isax
else:
assert list(payload)[0] == list(similarity_isax)[0]
########################################
#
# isax tree
#
########################################
status, payload = await self.client.isax_tree()
assert isinstance(payload, str)
assert len(payload) > 0
assert payload[:6] != 'ERROR'
if __name__ == '__main__':
unittest.main()
| |
from math import floor, sqrt
import numpy as np
from random import shuffle
from matplotlib import pyplot as plt
from scipy import ndimage
import sys, os, re, string
import pickle
from deltaRCM_tools import save_figure, random_pick, random_pick_list
class Tools(object):
_input_vars = {
'model_output__site_prefix': {'name':'site_prefix', 'type': 'string', 'default': ''},
'model_output__case_prefix': {'name':'case_prefix', 'type': 'string', 'default': ''},
'model_output__out_dir': {'name':'out_dir', 'type': 'string', 'default': 'deltaRCM_Output/'},
'model__total_timesteps': {'name':'n_steps', 'type': 'long', 'default': 200},
'model_grid__length': {'name':'Length', 'type': 'float', 'default': 200.},
'model_grid__width': {'name':'Width', 'type': 'float', 'default': 500.},
'model_grid__cell_size': {'name':'dx', 'type': 'float', 'default': 10.},
'land_surface__width': {'name':'L0_meters', 'type': 'float', 'default': 30.},
'land_surface__slope': {'name':'S0', 'type': 'float', 'default': 0.00015},
'model__max_iteration': {'name':'itermax', 'type': 'long', 'default': 3},
'water__number_parcels': {'name':'Np_water', 'type': 'long', 'default': 200},
'channel__flow_velocity': {'name':'u0', 'type': 'float', 'default': 1.},
'channel__width': {'name':'N0_meters', 'type': 'float', 'default': 50.},
'channel__flow_depth': {'name':'h0', 'type': 'float', 'default': 5.},
'sea_water_surface__elevation': {'name':'H_SL', 'type': 'float', 'default': 0.},
'sea_water_surface__rate_change_elevation': {'name':'SLR', 'type': 'float', 'default': 0.},
'sediment__number_parcels': {'name':'Np_sed', 'type': 'long', 'default': 500},
'sediment__bedload_fraction': {'name':'f_bedload', 'type': 'float', 'default': 0.25},
'sediment__influx_concentration': {'name':'C0_percent', 'type': 'float', 'default': 0.1},
'model_output__opt_eta_figs': {'name':'save_eta_figs', 'type': 'choice', 'default': False},
'model_output__opt_stage_figs': {'name':'save_stage_figs', 'type': 'choice', 'default': False},
'model_output__opt_depth_figs': {'name':'save_depth_figs', 'type': 'choice', 'default': False},
'model_output__opt_eta_grids': {'name':'save_eta_grids', 'type': 'choice', 'default': False},
'model_output__opt_stage_grids': {'name':'save_stage_grids', 'type': 'choice', 'default': False},
'model_output__opt_depth_grids': {'name':'save_depth_grids', 'type': 'choice', 'default': False},
'model_output__opt_time_interval': {'name':'save_dt', 'type': 'long', 'default': 10},
'coeff__surface_smoothing': {'name': 'Csmooth', 'type': 'float', 'default': 0.9},
'coeff__under_relaxation__water_surface': {'name': 'omega_sfc', 'type': 'float', 'default': 0.1},
'coeff__under_relaxation__water_flow': {'name': 'omega_flow', 'type': 'float', 'default': 0.9},
'coeff__iterations_smoothing_algorithm': {'name': 'Nsmooth', 'type': 'long', 'default': 5},
'coeff__depth_dependence__water': {'name': 'theta_water', 'type': 'float', 'default': 1.0},
'coeff__depth_dependence__sand': {'name': 'coeff_theta_sand', 'type': 'float', 'default': 2.0},
'coeff__depth_dependence__mud': {'name': 'coeff_theta_mud', 'type': 'float', 'default': 1.0},
'coeff__non_linear_exp_sed_flux_flow_velocity': {'name': 'beta', 'type': 'long', 'default': 3},
'coeff__sedimentation_lag': {'name': 'sed_lag', 'type': 'float', 'default': 1.0},
'coeff__velocity_deposition_mud': {'name': 'coeff_U_dep_mud', 'type': 'float', 'default': 0.3},
'coeff__velocity_erosion_mud': {'name': 'coeff_U_ero_mud', 'type': 'float', 'default': 1.5},
'coeff__velocity_erosion_sand': {'name': 'coeff_U_ero_sand', 'type': 'float', 'default': 1.05},
'coeff__topographic_diffusion': {'name': 'alpha', 'type': 'float', 'default': 0.1}
}
def flatten_indices(self, ind):
'''Flatten indices'''
return ind[0]*self.W + ind[1]
def output_data(self, timestep):
if int(timestep+1) % self.save_dt == 0:
if self.save_eta_figs:
plt.pcolor(self.eta)
plt.colorbar()
save_figure(self.prefix + "eta" + str(timestep+1))
if self.save_stage_figs:
plt.pcolor(self.stage)
plt.colorbar()
save_figure(self.prefix + "stage" + str(timestep+1))
if self.save_depth_figs:
plt.pcolor(self.depth)
plt.colorbar()
save_figure(self.prefix + "depth" + str(timestep+1))
#############################################
############### weight arrays ###############
#############################################
def build_weight_array(self, array, fix_edges = False, normalize = False):
'''
Create np.array((8,L,W)) of quantity a in each of the neighbors to a cell
'''
self.array = array
a_shape = array.shape
self.fix_edges = fix_edges
self.normalize = normalize
wgt_array = np.zeros((8,a_shape[0],a_shape[1]))
nums = range(8)
wgt_array[nums[0],:,:-1] = self.array[:,1:] # E
wgt_array[nums[1],1:,:-1] = self.array[:-1,1:] # NE
wgt_array[nums[2],1:,:] = self.array[:-1,:] # N
wgt_array[nums[3],1:,1:] = self.array[:-1,:-1] # NW
wgt_array[nums[4],:,1:] = self.array[:,:-1] # W
wgt_array[nums[5],:-1,1:] = self.array[1:,:-1] # SW
wgt_array[nums[6],:-1,:] = self.array[1:,:] # S
wgt_array[nums[7],:-1,:-1] = self.array[1:,1:] # SE
if self.fix_edges:
wgt_array[nums[0],:,-1] = wgt_array[nums[0],:,-2]
wgt_array[nums[1],:,-1] = wgt_array[nums[1],:,-2]
wgt_array[nums[7],:,-1] = wgt_array[nums[7],:,-2]
wgt_array[nums[1],0,:] = wgt_array[nums[1],1,:]
wgt_array[nums[2],0,:] = wgt_array[nums[2],1,:]
wgt_array[nums[3],0,:] = wgt_array[nums[3],1,:]
wgt_array[nums[3],:,0] = wgt_array[nums[3],:,1]
wgt_array[nums[4],:,0] = wgt_array[nums[4],:,1]
wgt_array[nums[5],:,0] = wgt_array[nums[5],:,1]
wgt_array[nums[5],-1,:] = wgt_array[nums[5],-2,:]
wgt_array[nums[6],-1,:] = wgt_array[nums[6],-2,:]
wgt_array[nums[7],-1,:] = wgt_array[nums[7],-2,:]
if self.normalize:
a_sum = np.sum(wgt_array, axis=0)
wgt_array[:,a_sum!=0] = wgt_array[:,a_sum!=0] / a_sum[a_sum!=0]
return wgt_array
def get_wet_mask_nh(self):
'''
Get np.array((8,L,W)) for each neighbor around a cell
with 1 if te neighbor is wet and 0 if dry
'''
wet_mask = (self.depth > self.dry_depth) * 1
wet_mask_nh = self.build_weight_array(wet_mask, fix_edges = True)
return wet_mask_nh
def get_wgt_sfc(self, wet_mask_nh):
'''
Get np.array((8,L,W)) (H - H_neighbor)/dist
for each neighbor around a cell
Takes an narray of the same size with 1 if wet and 0 if not
'''
wgt_sfc = self.build_weight_array(self.stage, fix_edges = True)
wgt_sfc = (self.stage - wgt_sfc) / np.array(self.dxn_dist)[:, np.newaxis, np.newaxis]
wgt_sfc = wgt_sfc * wet_mask_nh
wgt_sfc[wgt_sfc<0] = 0
wgt_sfc_sum = np.sum(wgt_sfc,axis=0)
wgt_sfc[:,wgt_sfc_sum>0] = wgt_sfc[:,wgt_sfc_sum>0] / wgt_sfc_sum[wgt_sfc_sum>0]
return wgt_sfc
def get_wgt_int(self, wet_mask_nh):
'''
Get np.array((8,L,W)) (qx*dxn_ivec + qy*dxn_jvec)/dist
for each neighbor around a cell
Takes an narray of the same size with 1 if wet and 0 if not
'''
wgt_int = (self.qx * np.array(self.dxn_ivec)[:,np.newaxis,np.newaxis] + \
self.qy * np.array(self.dxn_jvec)[:,np.newaxis,np.newaxis]) / \
np.array(self.dxn_dist)[:,np.newaxis,np.newaxis]
wgt_int[1:4,0,:] = 0
wgt_int = wgt_int * wet_mask_nh
wgt_int[wgt_int<0] = 0
wgt_int_sum = np.sum(wgt_int, axis=0)
wgt_int[:,wgt_int_sum>0] = wgt_int[:,wgt_int_sum>0]/wgt_int_sum[wgt_int_sum>0]
return wgt_int
def get_wgt(self):
'''
Get np.array((8,L,W)) of the probabilities of flow
between a cell and each of its neighbors
If the probabilities are zero in all directions, they will
be split equally among all wet neighbors
'''
wet_mask_nh = self.get_wet_mask_nh()
wgt_sfc = self.get_wgt_sfc(wet_mask_nh)
wgt_int = self.get_wgt_int(wet_mask_nh)
weight = self.gamma * wgt_sfc + (1-self.gamma) * wgt_int
wgt = self.build_weight_array(self.depth, fix_edges = True)
wgt = wgt**self.theta_water * weight
wet_mask = 1*(self.depth > self.dry_depth)
wgt = wgt * wet_mask
wgt[wgt<0] = 0
wgt_sum = np.sum(wgt,axis=0)
wgt[:,wgt_sum>0] = wgt[:,wgt_sum>0] / wgt_sum[wgt_sum>0]
# give wet cells with zero wgt to all wet neighbors equal probs for each of them
# wet cells with zero probabilities to all neighbors
wet_mask = 1*(self.depth > self.dry_depth)
wet_cells = np.where((wgt_sum + (wet_mask-1)) == 0)
wet = [(wet_cells[0][i],wet_cells[1][i]) for i in range(len(wet_cells[0]))]
# new weights to those cells - partitioned equally among the wet neighbors
new_vals = [wet_mask_nh[:,i[0],i[1]]/sum(wet_mask_nh[:,i[0],i[1]]) for i in wet]
for i in range(len(new_vals)):
wgt[:,wet[i][0],wet[i][1]] = new_vals[i]
wgt[1:4,0,:] = 0
return wgt
def get_sed_weight(self):
'''
Get np.array((8,L,W)) of probability field of routing to neighbors
for sediment parcels
'''
wet_mask_nh = self.get_wet_mask_nh()
weight = self.get_wgt_int(wet_mask_nh) * \
self.depth**self.theta_sand * wet_mask_nh
weight[weight<0] = 0.
weight_sum = np.sum(weight,axis=0)
weight[:,weight_sum>0] = weight[:,weight_sum>0]/weight_sum[weight_sum>0]
weight_f = np.zeros((self.L*self.W,8))
for i in range(8):
weight_f[:,i] = weight[i,:,:].flatten()
return weight_f
#############################################
################# smoothing #################
#############################################
def smoothing_filter(self, stageTemp):
'''
Smooth water surface
If any of the cells in a 9-cell window are wet, apply this filter
stageTemp : water surface
stageT : smoothed water surface
'''
stageT = stageTemp.copy()
wet_mask = self.depth > self.dry_depth
for t in range(self.Nsmooth):
local_mean = ndimage.uniform_filter(stageT)
stageT[wet_mask] = self.Csmooth * stageT[wet_mask] + \
(1-self.Csmooth) * local_mean[wet_mask]
returnval = (1-self.omega_sfc) * self.stage + self.omega_sfc * stageT
return returnval
def flooding_correction(self):
'''
Flood dry cells along the shore if necessary
Check the neighbors of all dry cells. If any dry cells have wet neighbors
Check that their stage is not higher than the bed elevation of the center cell
If it is, flood the dry cell
'''
wet_mask = self.depth > self.dry_depth
wet_mask_nh = self.get_wet_mask_nh()
wet_mask_nh_sum = np.sum(wet_mask_nh, axis=0)
# makes wet cells look like they have only dry neighbors
wet_mask_nh_sum[wet_mask] = 0
# indices of dry cells with wet neighbors
shore_ind = np.where(wet_mask_nh_sum > 0)
stage_nhs = self.build_weight_array(self.stage)
eta_shore = self.eta[shore_ind]
for i in range(len(shore_ind[0])):
# pretends dry neighbor cells have stage zero so they cannot be > eta_shore[i]
stage_nh = wet_mask_nh[:,shore_ind[0][i],shore_ind[1][i]] * \
stage_nhs[:,shore_ind[0][i],shore_ind[1][i]]
if (stage_nh > eta_shore[i]).any():
self.stage[shore_ind[0][i],shore_ind[1][i]] = max(stage_nh)
def topo_diffusion(self):
'''
Diffuse topography after routing all coarse sediment parcels
'''
wgt_cell_type = self.build_weight_array(self.cell_type > -2)
wgt_qs = self.build_weight_array(self.qs) + self.qs
wet_mask_nh = self.get_wet_mask_nh()
multiplier = self.dt/self.N_crossdiff * self.alpha * 0.5 / self.dx**2
for n in range(self.N_crossdiff):
wgt_eta = self.build_weight_array(self.eta) - self.eta
crossflux_nb = multiplier * wgt_qs * wgt_eta
crossflux_nb = crossflux_nb * wet_mask_nh
crossflux = np.sum(crossflux_nb, axis=0)
self.eta = self.eta + crossflux
#############################################
################# updaters ##################
#############################################
def update_flow_field(self, timestep, iteration):
'''
Update water discharge after one water iteration
'''
dloc = (self.qxn**2 + self.qyn**2)**(0.5)
qwn_div = np.ones_like(self.qwn)
qwn_div[dloc>0] = self.qwn[dloc>0] / dloc[dloc>0]
self.qxn *= qwn_div
self.qyn *= qwn_div
if timestep > 0:
omega = self.omega_flow_iter
if iteration == 0: omega = self.omega_flow
self.qx = self.qxn*omega + self.qx*(1-omega)
self.qy = self.qyn*omega + self.qy*(1-omega)
else:
self.qx = self.qxn.copy(); self.qy = self.qyn.copy()
self.qw = (self.qx**2 + self.qy**2)**(0.5)
self.qx[0,self.inlet] = self.qw0
self.qy[0,self.inlet] = 0
self.qw[0,self.inlet] = self.qw0
def update_velocity_field(self):
'''
Update the flow velocity field after one water iteration
'''
mask = (self.depth > self.dry_depth) * (self.qw > 0)
self.uw[mask] = np.minimum(self.u_max, self.qw[mask] / self.depth[mask])
self.uw[~mask] = 0
self.ux[mask]= self.uw[mask] * self.qx[mask] / self.qw[mask]
self.ux[~mask] = 0
self.uy[mask]= self.uw[mask] * self.qy[mask] / self.qw[mask]
self.uy[~mask] = 0
#############################################
################# water flow ################
#############################################
def init_water_iteration(self):
wgt = self.get_wgt()
for i in range(8):
self.wgt_flat[:,i] = wgt[i,:,:].flatten()
self.qxn[:] = 0; self.qyn[:] = 0; self.qwn[:] = 0
self.indices = np.zeros((self.Np_water, self.itmax/2), dtype = np.int)
self.path_number = np.array(range(self.Np_water))
self.save_paths = []
def run_water_iteration(self):
'''
Route all parcels of water in one iteration
'''
these_indices = map(lambda x: self.random_pick_list(self.inlet), range(self.Np_water))
these_indices = map(self.flatten_indices, these_indices)
self.indices[:,0] = these_indices
self.qxn.flat[these_indices] += 1
water_continue = True
it = 0
while water_continue:
ngh = map(self.random_pick, self.wgt_flat[these_indices])
new_indices = these_indices + self.walk_flat[ngh]
new_ind_type = self.cell_type.flat[new_indices]
# save the path numbers of the ones that reached the edge
if self.path_number[new_ind_type == -1].any():
self.save_paths.append( list(self.path_number[new_ind_type == -1]) )
walk_vals = self.walk[ngh]
self.qxn.flat[these_indices] += walk_vals[:,0]
self.qyn.flat[these_indices] += walk_vals[:,1]
walk_vals = self.walk[list( np.array(ngh)[new_ind_type >= -1] )]
n_these_indices = new_indices[new_ind_type >= -1]
n_path_number = self.path_number[new_ind_type >= -1]
for i in range(len(n_these_indices)):
self.qxn.flat[n_these_indices[i]] += walk_vals[i,0]
self.qyn.flat[n_these_indices[i]] += walk_vals[i,1]
it += 1
self.indices[n_path_number,it] = n_these_indices
these_indices = new_indices[new_ind_type >= 0]
self.path_number = self.path_number[new_ind_type >= 0]
# check for looping
if len(self.path_number)>0:
keeper = np.ones((len(these_indices),), dtype=np.int)
for i in range(len(these_indices)):
if np.in1d(self.indices[self.path_number[i],:it], these_indices[i]).any():
keeper[i] = 0
if these_indices[i]<0:
keeper[i] = 0
if np.min(keeper)==0:
these_indices = these_indices[keeper == 1]
self.path_number = self.path_number[keeper == 1]
if it == self.itmax-1 or len(these_indices)==0:
water_continue = False
# update qwn by counting the indices
all_indices = self.indices.flatten()
all_indices.sort()
loc = np.where(all_indices>0)[0][0]
ind_index_all = all_indices[loc:]
ind_count = np.bincount(ind_index_all)
ind_index = range(max(ind_index_all)+1)
qwn_sum = ind_count[ind_index] * self.Qp_water/self.dx
self.qwn.flat[ind_index] += qwn_sum
def finalize_water_iteration(self, timestep, iteration):
'''
Finish updating flow fields
Clean up at end of water iteration
'''
self.flooding_correction()
self.stage = np.maximum(self.stage, self.H_SL)
self.depth = np.maximum(self.stage - self.eta, 0)
self.update_flow_field(timestep, iteration)
self.update_velocity_field()
def get_profiles(self):
'''
Calculate the water surface profiles after routing flow parcels
Update water surface array
'''
paths_for_profile = [i for j in self.save_paths for i in j]
assert len(paths_for_profile) == len(set(paths_for_profile)), "save_paths has repeats!"
# get all the unique indices in good paths
unique_cells = list(set([j for i in paths_for_profile for j in list(set(self.indices[i]))]))
try:
unique_cells.remove(0)
except:
pass
unique_cells.sort()
# extract the values needed for the paths -- no need to do this for the entire space
uw_unique = self.uw.flat[unique_cells]
depth_unique = self.depth.flat[unique_cells]
ux_unique = self.ux.flat[unique_cells]
uy_unique = self.uy.flat[unique_cells]
profile_mask = np.add(uw_unique > 0.5*self.u0, depth_unique < 0.1*self.h0)
all_unique = zip(profile_mask,uw_unique,ux_unique,uy_unique)
sfc_array = np.zeros((len(unique_cells),2))
# make dictionaries to use as lookup tables
lookup = {}
self.sfc_change = {}
for i in range(len(unique_cells)):
lookup[unique_cells[i]] = all_unique[i]
self.sfc_change[unique_cells[i]] = sfc_array[i]
# process each profile
for i in paths_for_profile:
path = self.indices[i]
path = path[np.where(path>0)]
prf = [lookup[i][0] for i in path]
# find the last True
try:
last_True = (len(prf) - 1) - prf[::-1].index(True)
sub_path = path[:last_True]
sub_path_unravel = np.unravel_index(sub_path, self.eta.shape)
path_diff = np.diff(sub_path_unravel)
ux_ = [lookup[i][2] for i in sub_path[:-1]]
uy_ = [lookup[i][3] for i in sub_path[:-1]]
uw_ = [lookup[i][1] for i in sub_path[:-1]]
dH = self.S0 * (ux_ * path_diff[0] + uy_ * path_diff[1]) * self.dx
dH = [dH[i] / uw_[i] if uw_[i]>0 else 0 for i in range(len(dH))]
dH.append(0)
newH = np.zeros(len(sub_path))
for i in range(-2,-len(sub_path)-1,-1):
newH[i] = newH[i+1] + dH[i]
for i in range(len(sub_path)):
self.sfc_change[sub_path[i]] += [newH[i],1]
except:
pass
stageTemp = self.eta + self.depth
for k, v in self.sfc_change.iteritems():
if np.max(v) > 0:
stageTemp.flat[k] = v[0]/v[1]
self.stage = self.smoothing_filter(stageTemp)
#############################################
################# sed flow ##################
#############################################
def init_sed_timestep(self):
'''
Set up arrays to start sed routing timestep
'''
self.qs[:] = 0
self.Vp_dep_sand[:] = 0
self.Vp_dep_mud[:] = 0
def one_fine_timestep(self):
'''
Route all parcels of fine sediment
'''
self.num_fine = int(self.Np_sed - self.num_coarse)
if self.num_fine>0:
these_indices = map(lambda x: self.random_pick_list(self.inlet),range(self.num_fine))
these_indices = map(self.flatten_indices,these_indices)
self.indices = np.zeros((self.num_fine,self.itmax), dtype=np.int)
self.indices[:,0] = these_indices
path_number = np.array(range(self.num_fine))
self.Vp_res = np.zeros((self.Np_sed,)) + self.Vp_sed
self.qs.flat[these_indices] += self.Vp_res[path_number]/2/self.dt/self.dx
sed_continue = True
it = 0
while sed_continue:
weight = self.get_sed_weight()
ngh = map(self.random_pick, weight[these_indices])
new_indices = these_indices + self.walk_flat[ngh]
new_ind_type = self.cell_type.flat[new_indices]
self.qs.flat[these_indices] += self.Vp_res[path_number]/2/self.dt/self.dx
self.qs.flat[new_indices] += self.Vp_res[path_number]/2/self.dt/self.dx
these_indices = new_indices[new_ind_type >= 0]
path_number = path_number[new_ind_type >= 0]
if len(path_number)>0:
# check for looping
keeper = np.ones((len(these_indices),), dtype=np.int)
for i in range(len(these_indices)):
if np.in1d(self.indices[path_number[i],:], these_indices[i]).any():
keeper[i] = 0
if these_indices[i]<0:
keeper[i] = 0
if np.min(keeper)==0:
these_indices = these_indices[keeper == 1]
path_number = path_number[keeper == 1]
# save to the master indices
it += 1
self.indices[path_number,it] = these_indices
if (self.uw.flat[these_indices] < self.U_dep_mud).any():
update_ind = these_indices[self.uw.flat[these_indices] < self.U_dep_mud]
update_path = path_number[self.uw.flat[these_indices] < self.U_dep_mud]
Vp_res_ = self.Vp_res[update_path]
Vp_res_ = self.sed_lag * Vp_res_ * (self.U_dep_mud**self.beta - self.uw.flat[update_ind]**self.beta) / (self.U_dep_mud**self.beta)
self.Vp_dep = (self.stage.flat[update_ind] - self.eta.flat[update_ind])/4 * self.dx**2
self.Vp_dep = np.array([min((Vp_res_[i],self.Vp_dep[i])) for i in range(len(self.Vp_dep))])
self.Vp_dep_mud.flat[update_ind] += self.Vp_dep
self.Vp_res[update_path] -= self.Vp_dep
self.eta.flat[update_ind] += self.Vp_dep / self.dx**2
self.depth.flat[update_ind] = self.stage.flat[update_ind] - self.eta.flat[update_ind]
update_uw = [min(self.u_max, self.qw.flat[i]/self.depth.flat[i]) for i in update_ind]
self.uw.flat[update_ind] = update_uw
update_uwqw = [self.uw.flat[i]/self.qw.flat[i] if self.qw.flat[i]>0 else 0 for i in update_ind]
self.ux.flat[update_ind] = self.qx.flat[update_ind] * update_uwqw
self.uy.flat[update_ind] = self.qy.flat[update_ind] * update_uwqw
if (self.uw.flat[these_indices] > self.U_ero_mud).any():
update_ind = these_indices[self.uw.flat[these_indices] > self.U_ero_mud]
update_path = path_number[self.uw.flat[these_indices] > self.U_ero_mud]
Vp_res_ = self.Vp_sed * (self.uw.flat[update_ind]**self.beta - self.U_ero_mud**self.beta) / (self.U_ero_mud**self.beta)
self.Vp_ero = (self.stage.flat[update_ind] - self.eta.flat[update_ind])/4 * self.dx**2
self.Vp_ero = np.array([min((Vp_res_[i],self.Vp_ero[i])) for i in range(len(self.Vp_ero))])
self.eta.flat[update_ind] -= self.Vp_ero / self.dx**2
self.depth.flat[update_ind] = self.stage.flat[update_ind] - self.eta.flat[update_ind]
update_uw = [min(self.u_max, self.qw.flat[i]/self.depth.flat[i]) for i in update_ind]
self.uw.flat[update_ind] = update_uw
update_uwqw = [self.uw.flat[i]/self.qw.flat[i] if self.qw.flat[i]>0 else 0 for i in update_ind]
self.ux.flat[update_ind] = self.qx.flat[update_ind] * update_uwqw
self.uy.flat[update_ind] = self.qy.flat[update_ind] * update_uwqw
self.Vp_res[update_path] += self.Vp_ero
if it == self.itmax-1 or len(these_indices)==0:
sed_continue = False
def one_coarse_timestep(self):
'''
Route all parcels of coarse sediment
'''
self.num_coarse = int(round(self.Np_sed*self.f_bedload))
if self.num_coarse>0:
these_indices = map(lambda x: self.random_pick_list(self.inlet),range(self.num_coarse))
these_indices = map(self.flatten_indices,these_indices)
self.indices = np.zeros((self.num_coarse,self.itmax), dtype=np.int)
self.indices[:,0] = these_indices
path_number = np.array(range(self.num_coarse))
self.Vp_res = np.zeros((self.Np_sed,)) + self.Vp_sed
self.qs.flat[these_indices] += self.Vp_res[path_number]/2/self.dt/self.dx
sed_continue = True
it = 0
while sed_continue:
weight = self.get_sed_weight()
ngh = map(self.random_pick, weight[these_indices])
new_indices = these_indices + self.walk_flat[ngh]
new_ind_type = self.cell_type.flat[new_indices]
self.qs.flat[these_indices] += self.Vp_res[path_number]/2/self.dt/self.dx
self.qs.flat[new_indices] += self.Vp_res[path_number]/2/self.dt/self.dx
these_indices = new_indices[new_ind_type >= 0]
path_number = path_number[new_ind_type >= 0]
if len(path_number)>0:
# check for looping
keeper = np.ones((len(these_indices),), dtype=np.int)
for i in range(len(these_indices)):
if np.in1d(self.indices[path_number[i],:], these_indices[i]).any():
keeper[i] = 0
if these_indices[i]<0:
keeper[i] = 0
if np.min(keeper)==0:
these_indices = these_indices[keeper == 1]
path_number = path_number[keeper == 1]
it += 1
self.indices[path_number,it] = these_indices
qs_cap = self.qs0 * self.f_bedload/self.u0**self.beta * self.uw.flat[these_indices]**self.beta
if (self.qs.flat[these_indices] > qs_cap).any():
update_ind = these_indices[self.qs.flat[these_indices] > qs_cap]
update_path = path_number[self.qs.flat[these_indices] > qs_cap]
Vp_res_ = self.Vp_res[update_path]
self.Vp_dep = (self.stage.flat[update_ind] - self.eta.flat[update_ind])/4 * self.dx**2
self.Vp_dep = np.array([min((Vp_res_[i],self.Vp_dep[i])) for i in range(len(update_ind))])
eta_change = self.Vp_dep / self.dx**2
self.Vp_res[update_path] -= self.Vp_dep
self.Vp_dep_sand.flat[update_ind] += self.Vp_dep
self.eta.flat[update_ind] += eta_change
update_uw = [min(self.u_max, self.qw.flat[i]/self.depth.flat[i]) for i in update_ind]
self.uw.flat[update_ind] = update_uw
update_uwqw = [self.uw.flat[i]/self.qw.flat[i] if self.qw.flat[i]>0 else 0 for i in update_ind]
self.ux.flat[update_ind] = self.qx.flat[update_ind] * update_uwqw
self.uy.flat[update_ind] = self.qy.flat[update_ind] * update_uwqw
if ((self.qs.flat[these_indices] < qs_cap) * (self.uw.flat[these_indices] > self.U_ero_sand)).any():
update_ind = these_indices[(self.qs.flat[these_indices] < qs_cap) * (self.uw.flat[these_indices] > self.U_ero_sand)]
update_path = path_number[(self.qs.flat[these_indices] < qs_cap) * (self.uw.flat[these_indices] > self.U_ero_sand)]
Vp_res_ = self.Vp_sed * (self.uw.flat[update_ind]**self.beta - self.U_ero_sand**self.beta) / (self.U_ero_sand**self.beta)
Vp_ero_ = (self.stage.flat[update_ind] - self.eta.flat[update_ind])/4 * self.dx**2
self.Vp_ero = np.array([min((Vp_res_[i],Vp_ero_[i])) for i in range(len(update_ind))])
self.eta.flat[update_ind] -= self.Vp_ero / self.dx**2
self.depth.flat[update_ind] = self.stage.flat[update_ind] - self.eta.flat[update_ind]
update_uw = [min(self.u_max, self.qw.flat[i]/self.depth.flat[i]) for i in update_ind]
self.uw.flat[update_ind] = update_uw
update_uwqw = [self.uw.flat[i]/self.qw.flat[i] if self.qw.flat[i]>0 else 0 for i in update_ind]
self.ux.flat[update_ind] = self.qx.flat[update_ind] * update_uwqw
self.uy.flat[update_ind] = self.qy.flat[update_ind] * update_uwqw
self.Vp_res[update_path] += self.Vp_ero
if it == self.itmax-1 or len(these_indices)==0:
sed_continue = False
self.topo_diffusion()
def finalize_sed_timestep(self):
'''
Clean up after sediment routing
Update sea level if baselevel changes
'''
self.flooding_correction()
self.stage = np.maximum(self.stage, self.H_SL)
self.depth = np.maximum(self.stage-self.eta, 0)
self.eta[0,self.inlet] = self.stage[0,self.inlet] - self.h0
self.depth[0,self.inlet] = self.h0
self.H_SL = self.H_SL + self.SLR * self.dt
#############################################
############## initialization ###############
#############################################
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
def import_file(self):
if self.verbose: print 'Reading input file...'
self.input_file_vars = dict()
numvars = 0
o = open(self.input_file, mode = 'r')
for line in o:
line = re.sub('\s$','',line)
line = re.sub('\A[: :]*','',line)
ln = re.split('\s*[\:\=]\s*', line)
if len(ln)>1:
ln[0] = string.lower(ln[0])
if ln[0] in self._input_var_names:
numvars += 1
var_type = self._var_type_map[ln[0]]
ln[1] = re.sub('[: :]+$','',ln[1])
if var_type == 'string':
self.input_file_vars[str(ln[0])] = str(ln[1])
if var_type == 'float':
self.input_file_vars[str(ln[0])] = float(ln[1])
if var_type == 'long':
self.input_file_vars[str(ln[0])] = int(ln[1])
if var_type == 'choice':
ln[1] = string.lower(ln[1])
if ln[1] == 'yes' or ln[1] == 'true':
self.input_file_vars[str(ln[0])] = True
elif ln[1] == 'no' or ln[1] == 'false':
self.input_file_vars[str(ln[0])] = False
else:
print "Alert! The option for the 'choice' type variable " \
"in the input file '" + str(ln[0]) + "' is unrecognized. " \
"Please use only Yes/No or True/False as values.\n"
else:
print "Alert! The input file contains an unknown entry. The variable '" \
+ str(ln[0]) + "' is not an input variable for this model. Check " \
" the spelling of the variable name and only use the symbols : and = " \
"in variable assignments.\n"
o.close()
for k,v in self.input_file_vars.items():
setattr(self, self.get_var_name(k), v)
if self.verbose: print 'Finished reading ' + str(numvars) + ' variables from input file.'
def set_defaults(self):
self.random_pick = random_pick
self.random_pick_list = random_pick_list
self.save_figure = save_figure
for k,v in self._var_default_map.items():
setattr(self, self._var_name_map[k], v)
def create_dicts(self):
self._input_var_names = self._input_vars.keys()
self._var_type_map = dict()
self._var_name_map = dict()
self._var_default_map = dict()
for k in self._input_vars.keys():
self._var_type_map[k] = self._input_vars[k]['type']
self._var_name_map[k] = self._input_vars[k]['name']
self._var_default_map[k] = self._input_vars[k]['default']
def set_constants(self):
self.g = 9.81 # (gravitation const.)
self.dxn_iwalk = [1,1,0,-1,-1,-1,0,1]
self.dxn_jwalk = [0,1,1,1,0,-1,-1,-1]
self.dxn_dist = \
[sqrt(self.dxn_iwalk[i]**2 + self.dxn_jwalk[i]**2) for i in range(8)]
SQ05 = sqrt(0.5)
self.dxn_ivec = [0,-SQ05,-1,-SQ05,0,SQ05,1,SQ05]
self.dxn_jvec = [1,SQ05,0,-SQ05,-1,-SQ05,0,SQ05]
self.walk_flat = np.array([1, -49, -50, -51, -1, 49, 50, 51])
self.walk = np.array([[0,1], [-SQ05, SQ05], [-1,0], [-SQ05,-SQ05],
[0,-1], [SQ05,-SQ05], [1,0], [SQ05,SQ05]])
def create_other_variables(self):
self.set_constants()
self.theta_sand = self.coeff_theta_sand * self.theta_water
self.theta_mud = self.coeff_theta_mud * self.theta_water
self.U_dep_mud = self.coeff_U_dep_mud * self.u0
self.U_ero_sand = self.coeff_U_ero_sand * self.u0
self.U_ero_mud = self.coeff_U_ero_mud * self.u0
self.L0 = int(round(self.L0_meters / self.dx))
self.N0 = max(3,int(round(self.N0_meters / self.dx)))
self.L = int(round(self.Length/self.dx)) # num cells in x
self.W = int(round(self.Width/self.dx)) # num cells in y
self.u_max = 2.0 * self.u0 # maximum allowed flow velocity
self.C0 = self.C0_percent * 1/100 # sediment concentration
# (m) critial depth to switch to "dry" node
self.dry_depth = min(0.1, 0.1*self.h0)
self.CTR = floor(self.W/2)
self.gamma = self.g * self.S0 * self.dx / (self.u0**2)
self.V0 = self.h0 * (self.dx**2) # (m^3) reference volume (volume to
# fill cell to characteristic depth)
self.Qw0 = self.u0 * self.h0 * self.N0 * self.dx # const discharge
# at inlet
self.qw0 = self.u0 * self.h0 # water unit input discharge
self.Qp_water = self.Qw0 / self.Np_water # volume each water parcel
self.qs0 = self.qw0 * self.C0 # sed unit discharge
self.dVs = 0.1 * self.N0**2 * self.V0 # total amount of sed added
# to domain per timestep
self.Qs0 = self.Qw0 * self.C0 # sediment total input discharge
self.Vp_sed = self.dVs / self.Np_sed # volume of each sediment parcel
self.itmax = 2 * (self.L + self.W) # max number of jumps for parcel
self.dt = self.dVs / self.Qs0 # time step size
self.omega_flow_iter = 2 / self.itermax
# number of times to repeat topo diffusion
self.N_crossdiff = int(round(self.dVs / self.V0))
# self.prefix
self.prefix = self.out_dir
if self.site_prefix:
self.prefix += self.site_prefix + '_'
if self.case_prefix:
self.prefix += self.case_prefix + '_'
def create_domain(self):
'''
Creates the model domain
'''
##### empty arrays #####
x, y = np.meshgrid(np.arange(0,self.W), np.arange(0,self.L))
self.cell_type = np.zeros_like(x)
self.eta = np.zeros_like(x).astype(np.float32, copy=False)
self.stage = np.zeros_like(self.eta)
self.depth = np.zeros_like(self.eta)
self.qx = np.zeros_like(self.eta)
self.qy = np.zeros_like(self.eta)
self.qxn = np.zeros_like(self.eta)
self.qyn = np.zeros_like(self.eta)
self.qwn = np.zeros_like(self.eta)
self.ux = np.zeros_like(self.eta)
self.uy = np.zeros_like(self.eta)
self.uw = np.zeros_like(self.eta)
self.wgt_flat = np.zeros((self.L*self.W,8))
self.qs = np.zeros_like(self.eta)
self.Vp_dep_sand = np.zeros_like(self.eta)
self.Vp_dep_mud = np.zeros_like(self.eta)
##### domain #####
self.cell_type[((y-3)**2 + (x-self.CTR)**2)**(0.5) > self.L-5] = -1 # out
self.cell_type[:self.L0,:] = 2 # land
channel_inds = int(self.CTR-round(self.N0/2))
self.cell_type[:self.L0,channel_inds:channel_inds+self.N0] = 1 # channel
self.stage = (self.L0-y-1) * self.dx * self.S0
self.stage[self.cell_type <= 0] = 0.
self.depth[self.cell_type <= 0] = self.h0
self.depth[self.cell_type == 1] = self.h0
self.qx[self.cell_type == 1] = self.qw0
self.qx[self.cell_type <= 0] = self.qw0 / 5.
self.qw = (self.qx**2 + self.qy**2)**(0.5)
self.ux[self.depth>0] = self.qx[self.depth>0] / self.depth[self.depth>0]
self.uy[self.depth>0] = self.qy[self.depth>0] / self.depth[self.depth>0]
self.uw[self.depth>0] = self.qw[self.depth>0] / self.depth[self.depth>0]
self.cell_type[self.cell_type == 2] = -2 # reset the land cell_type to -2
self.inlet = list(np.unique(np.where(self.cell_type == 1)[1]))
self.eta = self.stage - self.depth
| |
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 6
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key in ('_order_', '__order__'):
return
elif key == '__order__':
key = '_order_'
if _is_sunder(key):
if key != '_order_':
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
_order_ = classdict.get('_order_')
if _order_ is None:
if pyver < 3.0:
try:
_order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
_order_ = [name for name in sorted(members.keys())]
else:
_order_ = classdict._member_names
else:
del classdict['_order_']
if pyver < 3.0:
_order_ = _order_.replace(',', ' ').split()
aliases = [name for name in members if name not in _order_]
_order_ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in _order_:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
_order_ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
_order_.append(member_name)
# only set _order_ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['_order_'] = ' '.join(_order_)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| |
"""
Support for Logitech UE Smart Radios.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.ue_smart_radio/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
MediaPlayerDevice)
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, STATE_IDLE, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ICON = 'mdi:radio'
URL = 'http://decibel.logitechmusic.com/jsonrpc.js'
SUPPORT_UE_SMART_RADIO = SUPPORT_PLAY | SUPPORT_PAUSE | SUPPORT_STOP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_TURN_ON | \
SUPPORT_TURN_OFF | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE
PLAYBACK_DICT = {
'play': STATE_PLAYING,
'pause': STATE_PAUSED,
'stop': STATE_IDLE,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def send_request(payload, session):
"""Send request to radio."""
try:
request = requests.post(
URL, cookies={"sdi_squeezenetwork_session": session},
json=payload, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.error("Timed out when sending request")
except requests.exceptions.ConnectionError:
_LOGGER.error("An error occurred while connecting")
else:
return request.json()
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Logitech UE Smart Radio platform."""
email = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
session_request = requests.post(
"https://www.uesmartradio.com/user/login",
data={"email": email, "password": password}, timeout=5)
session = session_request.cookies["sdi_squeezenetwork_session"]
player_request = send_request({"params": ["", ["serverstatus"]]}, session)
player_id = player_request["result"]["players_loop"][0]["playerid"]
player_name = player_request["result"]["players_loop"][0]["name"]
add_entities([UERadioDevice(session, player_id, player_name)])
class UERadioDevice(MediaPlayerDevice):
"""Representation of a Logitech UE Smart Radio device."""
def __init__(self, session, player_id, player_name):
"""Initialize the Logitech UE Smart Radio device."""
self._session = session
self._player_id = player_id
self._name = player_name
self._state = None
self._volume = 0
self._last_volume = 0
self._media_title = None
self._media_artist = None
self._media_artwork_url = None
def send_command(self, command):
"""Send command to radio."""
send_request({"method": "slim.request", "params":
[self._player_id, command]}, self._session)
def update(self):
"""Get the latest details from the device."""
request = send_request({
"method": "slim.request", "params":
[self._player_id, ["status", "-", 1,
"tags:cgABbehldiqtyrSuoKLN"]]}, self._session)
if request["error"] is not None:
self._state = None
return
if request["result"]["power"] == 0:
self._state = STATE_OFF
else:
self._state = PLAYBACK_DICT[request["result"]["mode"]]
media_info = request["result"]["playlist_loop"][0]
self._volume = request["result"]["mixer volume"] / 100
self._media_artwork_url = media_info["artwork_url"]
self._media_title = media_info["title"]
if "artist" in media_info:
self._media_artist = media_info["artist"]
else:
self._media_artist = media_info.get("remote_title")
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return True if self._volume <= 0 else False
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag of features that are supported."""
return SUPPORT_UE_SMART_RADIO
@property
def media_content_type(self):
"""Return the media content type."""
return MEDIA_TYPE_MUSIC
@property
def media_image_url(self):
"""Image URL of current playing media."""
return self._media_artwork_url
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
def turn_on(self):
"""Turn on specified media player or all."""
self.send_command(["power", 1])
def turn_off(self):
"""Turn off specified media player or all."""
self.send_command(["power", 0])
def media_play(self):
"""Send the media player the command for play/pause."""
self.send_command(["play"])
def media_pause(self):
"""Send the media player the command for pause."""
self.send_command(["pause"])
def media_stop(self):
"""Send the media player the stop command."""
self.send_command(["stop"])
def media_previous_track(self):
"""Send the media player the command for prev track."""
self.send_command(["button", "rew"])
def media_next_track(self):
"""Send the media player the command for next track."""
self.send_command(["button", "fwd"])
def mute_volume(self, mute):
"""Send mute command."""
if mute:
self._last_volume = self._volume
self.send_command(["mixer", "volume", 0])
else:
self.send_command(["mixer", "volume", self._last_volume * 100])
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.send_command(["mixer", "volume", volume * 100])
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import Queue
import re
import contextlib
import mock
from neutron import context
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.openstack.common import jsonutils as json
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.radware import driver
from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
class QueueMock(Queue.Queue):
def __init__(self, completion_handler):
self.completion_handler = completion_handler
super(QueueMock, self).__init__()
def put_nowait(self, oper):
self.completion_handler(oper)
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
elif action == 'DELETE':
return _delete_handler(resource)
elif action == 'POST':
return _post_handler(resource, binary)
else:
return 0, None, None, None
def _get_handler(resource):
if resource == GET_200[2]:
if rest_call_function_mock.TEMPLATES_MISSING:
data = json.loads('[]')
else:
data = json.loads(
'[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
)
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
data = json.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
def _delete_handler(resource):
return 404, '', '', {'message': 'Not Found'}
def _post_handler(resource, binary):
if re.search(r'/api/workflow/.+/action/.+', resource):
data = json.loads('{"uri":"some_uri"}')
return 202, '', '', data
elif re.search(r'/api/service\?name=.+', resource):
data = json.loads('{"links":{"actions":{"provision":"someuri"}}}')
return 201, '', '', data
elif binary:
return 201, '', '', ''
else:
return 202, '', '', ''
RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron.services.'
'loadbalancer.drivers.radware.driver.'
'LoadBalancerDriver:default')
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=RADWARE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerPlugin, self).setUp()
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False})
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': False})
self.operation_completer_start_mock = mock.Mock(
return_value=None)
self.operation_completer_join_mock = mock.Mock(
return_value=None)
self.driver_rest_call_mock = mock.Mock(
side_effect=rest_call_function_mock)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.completion_handler.start = (
self.operation_completer_start_mock)
radware_driver.completion_handler.join = (
self.operation_completer_join_mock)
radware_driver.rest_client.call = self.driver_rest_call_mock
radware_driver.completion_handler.rest_client.call = (
self.driver_rest_call_mock)
radware_driver.queue = QueueMock(
radware_driver.completion_handler.handle_operation_completion)
self.addCleanup(radware_driver.completion_handler.join)
def test_verify_workflow_templates(self):
"""Test the rest call failure handling by Exception raising."""
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
self.assertRaises(r_exc.WorkflowMissing,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
with self.network(do_delete=False) as network:
with self.subnet(network=network, do_delete=False) as subnet:
with self.pool(no_delete=True,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.create_vip,
context.get_admin_context(),
{'vip': vip_data})
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
# Test creation REST calls
calls = [
mock.call('GET', u'/api/service/srv_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', u'/api/service?name=srv_' +
subnet['subnet']['network_id'] + '&tenant=' +
vip['tenant_id'], mock.ANY,
driver.CREATE_SERVICE_HEADER),
mock.call('GET', u'/api/workflow/l2_l3_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', '/api/workflow/l2_l3_' +
subnet['subnet']['network_id'] +
'/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls,
any_order=True)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_create_vip_2_leg(self):
"""Test creation of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_sub:
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
name_suffix = '%s_%s' % (subnet['subnet']['network_id'],
pool_sub['subnet']['network_id'])
# Test creation REST calls
calls = [
mock.call('GET', '/api/workflowTemplate', None, None),
mock.call('GET', '/api/service/srv_' + name_suffix,
None, None),
mock.call('POST', '/api/service?name=srv_' +
name_suffix + '&tenant=' + vip['tenant_id'],
mock.ANY, driver.CREATE_SERVICE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('GET', '/api/workflow/l2_l3_' + name_suffix,
None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + name_suffix,
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/l2_l3_' +
name_suffix + '/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Test that PIP neutron port was created
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports > 0)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
vip_data['status'] = constants.PENDING_UPDATE
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_update_vip_2_leg(self):
"""Test update of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(calls)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_delete_vip_failure(self):
plugin = self.plugin_instance
with self.network(do_delete=False) as network:
with self.subnet(network=network, do_delete=False) as subnet:
with self.pool(no_delete=True,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with contextlib.nested(
self.member(pool_id=pool['pool']['id'],
no_delete=True),
self.member(pool_id=pool['pool']['id'],
address='192.168.1.101',
no_delete=True),
self.health_monitor(no_delete=True),
self.vip(pool=pool, subnet=subnet, no_delete=True)
) as (mem1, mem2, hm, vip):
plugin.create_pool_health_monitor(
context.get_admin_context(), hm, pool['pool']['id']
)
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
plugin.delete_vip(
context.get_admin_context(), vip['vip']['id'])
u_vip = plugin.get_vip(
context.get_admin_context(), vip['vip']['id'])
u_pool = plugin.get_pool(
context.get_admin_context(), pool['pool']['id'])
u_mem1 = plugin.get_member(
context.get_admin_context(), mem1['member']['id'])
u_mem2 = plugin.get_member(
context.get_admin_context(), mem2['member']['id'])
u_phm = plugin.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id'])
self.assertEqual(u_vip['status'], constants.ERROR)
self.assertEqual(u_pool['status'], constants.ACTIVE)
self.assertEqual(u_mem1['status'], constants.ACTIVE)
self.assertEqual(u_mem2['status'], constants.ACTIVE)
self.assertEqual(u_phm['status'], constants.ACTIVE)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_delete_vip_2_leg(self):
"""Test deletion of a VIP where Alteon VIP and PIP are different."""
self.driver_rest_call_mock.reset_mock()
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
# Test that PIP neutron port was deleted
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports == 0)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_update_pool(self):
with self.subnet():
with self.pool() as pool:
del pool['pool']['provider']
del pool['pool']['status']
self.plugin_instance.update_pool(
context.get_admin_context(),
pool['pool']['id'], pool)
pool_db = self.plugin_instance.get_pool(
context.get_admin_context(), pool['pool']['id'])
self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
def test_delete_pool_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
no_delete=True,
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
context.get_admin_context(),
pool['pool']['id'])
def test_create_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.vip(pool=p, subnet=subnet):
with self.member(pool_id=p['pool']['id']):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_update_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
self.assertEqual(updated_member['status'],
constants.ACTIVE)
def test_update_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
member['member']['status'] = constants.PENDING_UPDATE
updated_member = self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
self.assertEqual(updated_member['status'],
constants.PENDING_UPDATE)
def test_delete_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id'],
no_delete=True) as m:
with self.vip(pool=p, subnet=subnet):
# Reset mock and
# wait for being sure the member
# Changed status from PENDING-CREATE
# to ACTIVE
self.plugin_instance.delete_member(
context.get_admin_context(),
m['member']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'member_address_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_delete_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'], no_delete=True) as m:
self.plugin_instance.delete_member(
context.get_admin_context(), m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_create_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor() as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Test REST calls
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
phm = self.plugin_instance.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id']
)
self.assertEqual(phm['status'], constants.ACTIVE)
def test_delete_pool_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor(no_delete=True) as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
self.plugin_instance.delete_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'hm_uuid_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(
loadbalancer.PoolMonitorAssociationNotFound,
self.plugin_instance.get_pool_health_monitor,
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
| |
from tqdm import tqdm
import json
import numpy as np
from numpy import pi
import pandas as pd
import scipy.sparse.linalg
from scipy import sparse, stats, spatial
import networkx as nx
from networkx.algorithms import community
import matplotlib.pyplot as plt
import seaborn as sns
import ephem
import reverse_geocoder as rg
from collections import Counter
from multiprocessing import Pool
from pygsp import graphs, filters, plotting
import random
def data_reduction_per_percentile(satcat_df,
TARGET_PERCENTILE_LAUNCH_SITES = 90,
TARGET_PERCENTILE_SOURCES = 90):
""" Reduce the satcat_df using percentiles of the total data
"""
launches_per_site = satcat_df.launch_site.value_counts()
launches_per_source = satcat_df.source.value_counts()
min_launch_nbr = np.percentile(launches_per_site, TARGET_PERCENTILE_LAUNCH_SITES)
min_source_nbr = np.percentile(launches_per_source, TARGET_PERCENTILE_SOURCES)
print("Min launch number: {}".format(min_launch_nbr))
print("Min source number: {}".format(min_source_nbr))
reduced_satcat_df = satcat_df
for source, nbr in launches_per_source.iteritems():
if nbr < min_launch_nbr:
reduced_satcat_df = reduced_satcat_df.loc[~(reduced_satcat_df.source == source)]
print("Unique sources: Initial: {}, Final: {}".format(len(satcat_df.source.unique()),len(reduced_satcat_df.source.unique())))
for site, nbr in launches_per_site.iteritems():
if nbr < min_launch_nbr:
reduced_satcat_df = reduced_satcat_df.loc[~(reduced_satcat_df.launch_site == site)]
print("Unique launch sites: Initial: {}, Final: {}".format(len(satcat_df.launch_site.unique()),len(reduced_satcat_df.launch_site.unique())))
return reduced_satcat_df
def data_reduction_per_launch_site(satcat_df, LAUNCH_SITES = ["AFETR", "AFWTR"]):
""" Reduce the satcat_df using the launch sites values """
reduced_satcat_df = satcat_df
to_keep = pd.DataFrame()
for site in LAUNCH_SITES:
to_keep = to_keep.append(reduced_satcat_df.loc[reduced_satcat_df.launch_site == site])
reduced_satcat_df = reduced_satcat_df.loc[to_keep.index]
return reduced_satcat_df
def normalize_features(features_df):
"""Normalize the features and drop any column containing null values"""
features_df -= features_df.mean(axis=0)
features_df /= features_df.std(axis=0)
columns_to_drop = []
for column in features_df.columns:
if features_df[column].isnull().any():
columns_to_drop.append(column)
features_df = features_df.drop(columns_to_drop, axis=1)
return features_df
def plot_weight_hist(weights, axes, name=None):
"""Plot an historigram from weight values"""
axes[0].spy(weights)
axes[1].hist(weights[weights > 0].reshape(-1), bins=50);
if name:
plt.savefig("fig/{}.png".format(name))
def check_symmetric(a, tol=1e-10):
"""Function to check if a is symmetric
taken from: https://stackoverflow.com/questions/42908334/checking-if-a-matrix-is-symmetric-in-numpy
"""
return np.allclose(a, a.T, atol=tol)
def get_weights_from_distance(distances, kernel_width_percentile=0.5):
# We use the percentile value to set the kernel width
# It allows for more latitude than the mean and can easily be adapted
kernel_width = np.percentile(distances, kernel_width_percentile)
weights = np.exp((-np.square(distances))/kernel_width**2)
# Set the diagonal weights to 0
for index in range(len(weights)):
weights[index][index] = 0
return weights
def sparse_weights(weights: object, neighbors: object = 100, epsilon: object = 1e-8) -> object:
"""Function to sparsify the weight matrix
It will set to zero the weights that are not in "neighbors"
It will set to zero the weights that are smaller than "epsilon"
It will ensure that the resulting matrix is symmetric
"""
sorted_weights_indexes = np.argsort(weights, axis=1)
# Set to zero the weights that are not in the NEIGHBORS
for line in tqdm(range(len(sorted_weights_indexes))):
for index in range(len(weights[0])-neighbors):
weights[line][sorted_weights_indexes[line][index]] = 0.0
# Filter weights that are too small
# If we keep them ,we would have an highly connected graph
# which is not what we want for the next sections
for i in tqdm(range(len(weights))):
for j in range(i):
if weights[i][j] < epsilon:
weights[i][j] = 0
if weights[j][i] < epsilon:
weights[j][i] = 0
# Ensure symmetry
for i in tqdm(range(len(weights))):
# We need to check only the bottom triangle, because we do two checks
# This reduces significantly the loop time
for j in range(i):
if weights[i][j] == 0 and weights[j][i] != 0:
weights[i][j] = weights[j][i]
if weights[i][j] != 0 and weights[j][i] == 0:
weights[j][i] = weights[i][j]
return weights
def get_weights_from_feature_dataframe(feature_df, kernel_width_percentile = 0.5, neighbors=100, epsilon=1e-8):
"""Functions to get sparcified weights directly from the feature dataframe"""
distance_metric = "braycurtis"
distances = spatial.distance.squareform(spatial.distance.pdist(feature_df, metric=distance_metric))
weights = get_weights_from_distance(distances, kernel_width_percentile)
weights = sparse_weights(weights, neighbors, epsilon)
return weights
def create_graph_from_weights(weights, satcat_df):
"""Create a graph from the weights
Each node will have the norad value as an argument
"""
graph = nx.Graph(weights)
norad_values = list(satcat_df.index)
for node in graph.nodes():
graph.node[node]['NORAD'] = norad_values[node]
return graph
def get_nodes_per_site(graph, satcat_df):
"""Get the label of each node per launch site
This assumes that there's at least two launch sites
"""
nodes_per_site = {site:[] for site in satcat_df.num_launch_site.unique()}
for node in graph.nodes():
norad = graph.node[node]['NORAD']
launch_site = satcat_df.loc[norad, "num_launch_site"]
nodes_per_site[launch_site].append(node)
return nodes_per_site
def draw_graph(graph, axes, satcat_df, name=None):
nodes_per_site = get_nodes_per_site(graph, satcat_df)
layout = nx.spring_layout(graph)
for key in list(nodes_per_site.keys()):
nx.draw_networkx_nodes(graph,
layout,
nodes_per_site[key],
node_color=plt.get_cmap('Set1')(key),
node_size=10,
ax=axes
)
nx.draw_networkx_edges(graph,layout, width=0.1, ax=axes)
if name:
plt.savefig("fig/{}.png".format(name))
def remove_lonely_nodes(graph, minimum_degree = 0):
nodes_to_drop = []
for node in tqdm(graph.nodes()):
if graph.degree(node) <= minimum_degree:
nodes_to_drop.append(node)
graph.remove_nodes_from(nodes_to_drop)
return graph
def get_nodes_nbr(graphs):
return [len(graph.nodes()) for graph in graphs]
def print_subgraphs_nodes_dist(subgraphs, axes, name=None):
nodes_nbr = get_nodes_nbr(subgraphs)
sns.distplot(nodes_nbr,kde=False, rug=True, ax=axes);
if name:
plt.savefig("fig/{}.png".format(name))
def print_subgraphs_network(subgraphs, satcat_df, name=None):
"""Use previously defined draw_graph function to draw
and array of subgraphs
"""
nbr_of_graphs = len(subgraphs)
if nbr_of_graphs%2 == 1:
nbr_of_subplots = nbr_of_graphs//2+1
else:
nbr_of_subplots = nbr_of_graphs//2
fig, axes = plt.subplots(nbr_of_subplots,2, figsize=(20,nbr_of_graphs*1.5))
for graph_index,graph in enumerate(subgraphs):
draw_graph(graph, axes[graph_index//2][graph_index%2], satcat_df)
if name:
plt.savefig("fig/{}.png".format(name))
def get_big_subgraphs_index(subgraphs, max_nodes):
""" Create an array with the index of the subgraphs
that contain more than max_nodes nodes
"""
big_subgraphs_index = []
for index, graph in enumerate(subgraphs):
if len(graph.nodes()) > max_nodes:
big_subgraphs_index.append(index)
return big_subgraphs_index
def get_graph_cliques(graph, smallest_clique = 10):
"""Determine the cliques of a graph and return
an array of subgraphs that correspond to those cliques
"""
c = list(community.k_clique_communities(graph,
k=smallest_clique
)
)
cliques = [clique for clique in c]
subgraphs = [graph.subgraph(value) for value in cliques]
return subgraphs
def create_unlabeled_label_df(satcat_df):
"""Create an unlabeled label dataframe from a satcat dataframe
"""
number_of_sats = len(satcat_df)
labels = np.full(number_of_sats, -1)
is_labeled = np.full(number_of_sats, 0)
real_labels = satcat_df.num_launch_site
nodes_with_label = pd.DataFrame({"label":labels,
"is_labeled":is_labeled,
"NORAD":list(satcat_df.index),
"real_label":real_labels.values})
nodes_with_label = nodes_with_label.set_index("NORAD")
return nodes_with_label
def label_nodes(label_df, percent_of_labeled):
"""This function will label of a fraction of the nodes"""
number_of_sats = len(label_df)
number_of_labeled = int(number_of_sats * (percent_of_labeled * 0.01))
labeled_nodes = random.sample( list(label_df.index),
k = number_of_labeled
)
for node in labeled_nodes:
label_df.loc[node, "is_labeled"] = 1
label_df.loc[node, "label"] = label_df.loc[node, "real_label"]
return label_df
def create_labeled_df(satcat_df, percent_of_labeled):
"""Create a labeled dataframe"""
label_df = create_unlabeled_label_df(satcat_df)
label_df = label_nodes(label_df, percent_of_labeled)
return label_df
def get_label_probs(label_df, subgraph):
"""Get the probability of each label for a specific subgraph"""
subgraph_df = label_df.loc[[subgraph.node[node]['NORAD'] for node in subgraph.nodes()]]
labeled_subgraph_df = subgraph_df[subgraph_df.is_labeled == 1]
values_per_label = labeled_subgraph_df.real_label.value_counts()
probability_dict = {}
for label, count in values_per_label.iteritems():
probability_dict[label] = count/len(labeled_subgraph_df)
return probability_dict
def set_subgraph_label(label_df, subgraph, label):
"""Use to set all the labels of a subgraph to the same value"""
for node in subgraph.nodes():
norad = subgraph.node[node]["NORAD"]
if label_df.loc[norad, "is_labeled"] == 0:
label_df.loc[norad, "label"] = label
label_df.loc[norad, "is_labeled"] = 1
return label_df
def get_subgraphs_from_weights(weights, reduced_satcat_df, MAXIMUM_SUBGRAPH_NODES_PERCENT, SIZE_OF_SMALLEST_CLIQUE):
"""get the subgraphs using weights and reduced_satcat_df values
"""
graph = create_graph_from_weights(weights, reduced_satcat_df)
remove_lonely_nodes(graph)
# Get subgraphs
connected_subgraphs = []
for subgraph in nx.connected_component_subgraphs(graph):
connected_subgraphs.append(nx.Graph(subgraph))
maximum_subgraph_nodes = len(graph.nodes())*MAXIMUM_SUBGRAPH_NODES_PERCENT
# Get the index of the big subgraphs
big_subgraphs_index = get_big_subgraphs_index(connected_subgraphs,
maximum_subgraph_nodes
)
# Segment the big subgraphs into cliques
clique_subgraphs = []
for subgraph_index in big_subgraphs_index:
current_subgraph = connected_subgraphs[subgraph_index]
current_subgraphs = get_graph_cliques(current_subgraph,
smallest_clique=SIZE_OF_SMALLEST_CLIQUE
)
clique_subgraphs += current_subgraphs
connected_subgraphs_no_big = [subgraph for index, subgraph in enumerate(connected_subgraphs)
if not index in big_subgraphs_index]
subgraphs = clique_subgraphs + connected_subgraphs_no_big
return subgraphs
def identify_from_prob(label_df, subgraph, probability_dict):
"""From the probability dictionnary, determine the label of the nodes"""
if len(probability_dict) == 0: # Unknown
label = -1
else:
# NOTE: if there's equal probability, it will take one of them, which one isn't known
label = max(probability_dict, key=probability_dict.get)
label_df = set_subgraph_label(label_df, subgraph, label)
return label_df
def identify_nodes_from_prob(label_df, subgraphs):
"""Identify every graph in subgraphs"""
for subgraph in subgraphs:
prob = get_label_probs(label_df, subgraph)
label_df = identify_from_prob(label_df, subgraph, prob)
return label_df
def get_labeled_df(satcat_df, subgraphs, percent_labeled):
"""Get a labeled dataframe"""
labeled_df = create_labeled_df(satcat_df, percent_labeled)
labeled_df = identify_nodes_from_prob(labeled_df, subgraphs)
labeled_df = labeled_df[labeled_df.is_labeled == 1] # Keep only labeled nodes
return labeled_df
def get_error_properties(label_df, sat_df, show=True):
"""Function to get various error properties from the label dataframe"""
compared_label = label_df.label == label_df.real_label
unknown_label = label_df.loc[label_df.label == -1]
total_sat = len(sat_df)
total_label = len(compared_label)
total_good_label = compared_label.sum()
total_bad_label = total_label - total_good_label
total_unknown_label = len(unknown_label)
good_classification_percent = total_good_label/total_label
if show:
print("Total Sats: {}, Labels:{}, Good:{}, Bad: {}, Unknown: {}, Percent: {}".format(total_sat,
total_label,
total_good_label,
total_bad_label,
total_unknown_label,
good_classification_percent
)
)
return {"total_sat" :total_sat,
"total_label" : total_label,
"total_good_label" : total_good_label,
"total_bad_label" : total_bad_label,
"total_unknown_label" : total_unknown_label,
"good_classification_percent" : good_classification_percent
}
def print_error_graph(error, file_name=None):
"""Function to print the errors in a graph"""
x=list(error.keys())
y1=[error[key]["good_classification_percent"]*100 for key in error.keys()]
y6=[(1-error[key]["good_classification_percent"])*100 for key in error.keys()]
y2=[error[key]["total_unknown_label"] for key in error.keys()]
y3=[error[key]["total_good_label"] for key in error.keys()]
y4=[error[key]["total_bad_label"] for key in error.keys()]
y5=[error[key]["total_label"] for key in error.keys()]
fig, ax1 = plt.subplots(figsize=(10,10))
from matplotlib.ticker import AutoMinorLocator
minorLocator = AutoMinorLocator()
ax1.xaxis.set_minor_locator(minorLocator)
ax1.yaxis.set_minor_locator(minorLocator)
ax1.grid(b=True, which='major', linestyle='-')
ax1.grid(b=True, which='minor', linestyle='--')
l1, = ax1.plot(x, y1, 'b', label="Good labels")
l6, = ax1.plot(x, y6, 'm', label="Errors (bad label or unknown)")
legend1 = plt.legend([l1, l6], ["Good labels", "Errors (bad label or unknown)"], loc=1)
ax1.set_ylabel("Good labelling proportion (%)")
ax1.set_xlabel("Labelized Nodes (%)")
ax2 = ax1.twinx()
ax2.set_ylabel('Number of Nodes')
ax2.set_yscale("log")
l2, = ax2.plot(x, y2, 'c--', label="Total Unknown Label")
l3, = ax2.plot(x, y3, 'g--', label="Total Good Label")
l4, = ax2.plot(x, y4, 'r--', label="Total Bad Label")
l5, = ax2.plot(x, y5, 'k--', label="Total Label")
plt.title("Result analysis\nleft axis is for full lines, right for dotted lines")
plt.legend([l2, l3, l4, l5], ["Total Unknown Label", "Total Good Label", "Total Bad Label", "Total Label"], loc=3)
if file_name:
plt.savefig('fig/{}.png'.format(file_name))
plt.show()
def calculate_all_values(satcat_df,
get_feature_dataframe,
REDUCE_PER_PERCENTILE= False,
REDUCE_PER_LAUNCH_SITE = True,
TARGET_PERCENTILE_LAUNCH_SITES = 90,
TARGET_PERCENTILE_SOURCES = 90,
LAUNCH_SITES = ["AFETR", "AFWTR"],
ONLY_PAYLOAD = True,
ONLY_OPERATIONAL = False,
SIZE_OF_SMALLEST_CLIQUE = 20,
):
"""Function to get the reduced_satcat_df and the subgraphs from parameters"""
# WEIGHTS PARAMETERS
KERNEL_WIDTH_PERCENTILE = 0.5
NEIGHBORS = 100
EPSILON = 1e-8
# GRAPH PARAMETERS
MAXIMUM_SUBGRAPH_NODES_PERCENT = 0.20
SIZE_OF_SMALLEST_CLIQUE = 20
# Get reduced dataframe
if REDUCE_PER_PERCENTILE:
reduced_satcat_df = data_reduction_per_percentile(satcat_df, TARGET_PERCENTILE_LAUNCH_SITES, TARGET_PERCENTILE_SOURCES)
elif REDUCE_PER_LAUNCH_SITE:
reduced_satcat_df = data_reduction_per_launch_site(satcat_df, LAUNCH_SITES)
if ONLY_PAYLOAD:
reduced_satcat_df = reduced_satcat_df.loc[reduced_satcat_df.payload_flag == True]
if ONLY_OPERATIONAL:
reduced_satcat_df = reduced_satcat_df.loc[reduced_satcat_df.operational_status == "Operational"]
print("getting feature vector")
# Get feature vector
feature_df = get_feature_dataframe(reduced_satcat_df, ONLY_PAYLOAD, ONLY_OPERATIONAL)
print("getting weights")
# Get weight matrix
weights = get_weights_from_feature_dataframe(feature_df, KERNEL_WIDTH_PERCENTILE, NEIGHBORS, EPSILON)
# Get subgraphs
print("getting subgraphs")
subgraphs = get_subgraphs_from_weights(weights, reduced_satcat_df, MAXIMUM_SUBGRAPH_NODES_PERCENT, SIZE_OF_SMALLEST_CLIQUE)
del weights
del feature_df
result_dict = {"reduced_satcat_df":reduced_satcat_df, "subgraphs":subgraphs}
return result_dict
def calculate_error(reduced_satcat_df, subgraphs, values = [0, 1, 2, 5, 10, 20, 40, 60, 80, 90, 100]):
"""Calculate the resulting error for a given result"""
# Get labels
error = {}
for percent_labeled in values:
labeled_df = get_labeled_df(reduced_satcat_df, subgraphs, percent_labeled)
error[percent_labeled] = get_error_properties(labeled_df, reduced_satcat_df, show=False)
for key in error.keys():
print(key, ":", error[key]["good_classification_percent"])
return error
| |
"""Support for IHC devices."""
import logging
import os.path
from defusedxml import ElementTree
from ihcsdk.ihccontroller import IHCController
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
CONF_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_TYPE,
CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_CONTROLLER_ID,
ATTR_IHC_ID,
ATTR_VALUE,
CONF_AUTOSETUP,
CONF_BINARY_SENSOR,
CONF_DIMMABLE,
CONF_INFO,
CONF_INVERTING,
CONF_LIGHT,
CONF_NODE,
CONF_NOTE,
CONF_OFF_ID,
CONF_ON_ID,
CONF_POSITION,
CONF_SENSOR,
CONF_SWITCH,
CONF_XPATH,
SERVICE_PULSE,
SERVICE_SET_RUNTIME_VALUE_BOOL,
SERVICE_SET_RUNTIME_VALUE_FLOAT,
SERVICE_SET_RUNTIME_VALUE_INT,
)
from .util import async_pulse
_LOGGER = logging.getLogger(__name__)
AUTO_SETUP_YAML = "ihc_auto_setup.yaml"
DOMAIN = "ihc"
IHC_CONTROLLER = "controller"
IHC_INFO = "info"
PLATFORMS = ("binary_sensor", "light", "sensor", "switch")
def validate_name(config):
"""Validate the device name."""
if CONF_NAME in config:
return config
ihcid = config[CONF_ID]
name = f"ihc_{ihcid}"
config[CONF_NAME] = name
return config
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_NOTE): cv.string,
vol.Optional(CONF_POSITION): cv.string,
}
)
SWITCH_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_OFF_ID, default=0): cv.positive_int,
vol.Optional(CONF_ON_ID, default=0): cv.positive_int,
}
)
BINARY_SENSOR_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
vol.Optional(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
}
)
LIGHT_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
vol.Optional(CONF_OFF_ID, default=0): cv.positive_int,
vol.Optional(CONF_ON_ID, default=0): cv.positive_int,
}
)
SENSOR_SCHEMA = DEVICE_SCHEMA.extend(
{vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): cv.string}
)
IHC_SCHEMA = vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTOSETUP, default=True): cv.boolean,
vol.Optional(CONF_BINARY_SENSOR, default=[]): vol.All(
cv.ensure_list, [vol.All(BINARY_SENSOR_SCHEMA, validate_name)]
),
vol.Optional(CONF_INFO, default=True): cv.boolean,
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list, [vol.All(LIGHT_SCHEMA, validate_name)]
),
vol.Optional(CONF_SENSOR, default=[]): vol.All(
cv.ensure_list, [vol.All(SENSOR_SCHEMA, validate_name)]
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list, [vol.All(SWITCH_SCHEMA, validate_name)]
),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [IHC_SCHEMA]))}, extra=vol.ALLOW_EXTRA
)
AUTO_SETUP_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BINARY_SENSOR, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
vol.Optional(CONF_TYPE): cv.string,
}
)
],
),
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
}
)
],
),
vol.Optional(CONF_SENSOR, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(
CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS
): cv.string,
}
)
],
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
}
)
],
),
}
)
SET_RUNTIME_VALUE_BOOL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): cv.boolean,
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
SET_RUNTIME_VALUE_INT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(int),
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
SET_RUNTIME_VALUE_FLOAT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(float),
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
PULSE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Optional(ATTR_CONTROLLER_ID, default=0): cv.positive_int,
}
)
def setup(hass, config):
"""Set up the IHC integration."""
conf = config.get(DOMAIN)
for index, controller_conf in enumerate(conf):
if not ihc_setup(hass, config, controller_conf, index):
return False
return True
def ihc_setup(hass, config, conf, controller_id):
"""Set up the IHC integration."""
url = conf[CONF_URL]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
ihc_controller = IHCController(url, username, password)
if not ihc_controller.authenticate():
_LOGGER.error("Unable to authenticate on IHC controller")
return False
if conf[CONF_AUTOSETUP] and not autosetup_ihc_products(
hass, config, ihc_controller, controller_id
):
return False
# Manual configuration
get_manual_configuration(hass, config, conf, ihc_controller, controller_id)
# Store controller configuration
ihc_key = f"ihc{controller_id}"
hass.data[ihc_key] = {IHC_CONTROLLER: ihc_controller, IHC_INFO: conf[CONF_INFO]}
# We only want to register the service functions once for the first controller
if controller_id == 0:
setup_service_functions(hass)
return True
def get_manual_configuration(hass, config, conf, ihc_controller, controller_id):
"""Get manual configuration for IHC devices."""
for platform in PLATFORMS:
discovery_info = {}
if platform in conf:
platform_setup = conf.get(platform)
for sensor_cfg in platform_setup:
name = sensor_cfg[CONF_NAME]
device = {
"ihc_id": sensor_cfg[CONF_ID],
"ctrl_id": controller_id,
"product": {
"name": name,
"note": sensor_cfg.get(CONF_NOTE) or "",
"position": sensor_cfg.get(CONF_POSITION) or "",
},
"product_cfg": {
"type": sensor_cfg.get(CONF_TYPE),
"inverting": sensor_cfg.get(CONF_INVERTING),
"off_id": sensor_cfg.get(CONF_OFF_ID),
"on_id": sensor_cfg.get(CONF_ON_ID),
"dimmable": sensor_cfg.get(CONF_DIMMABLE),
"unit_of_measurement": sensor_cfg.get(CONF_UNIT_OF_MEASUREMENT),
},
}
discovery_info[name] = device
if discovery_info:
discovery.load_platform(hass, platform, DOMAIN, discovery_info, config)
def autosetup_ihc_products(hass: HomeAssistant, config, ihc_controller, controller_id):
"""Auto setup of IHC products from the IHC project file."""
project_xml = ihc_controller.get_project()
if not project_xml:
_LOGGER.error("Unable to read project from IHC controller")
return False
project = ElementTree.fromstring(project_xml)
# If an auto setup file exist in the configuration it will override
yaml_path = hass.config.path(AUTO_SETUP_YAML)
if not os.path.isfile(yaml_path):
yaml_path = os.path.join(os.path.dirname(__file__), AUTO_SETUP_YAML)
yaml = load_yaml_config_file(yaml_path)
try:
auto_setup_conf = AUTO_SETUP_SCHEMA(yaml)
except vol.Invalid as exception:
_LOGGER.error("Invalid IHC auto setup data: %s", exception)
return False
groups = project.findall(".//group")
for platform in PLATFORMS:
platform_setup = auto_setup_conf[platform]
discovery_info = get_discovery_info(platform_setup, groups, controller_id)
if discovery_info:
discovery.load_platform(hass, platform, DOMAIN, discovery_info, config)
return True
def get_discovery_info(platform_setup, groups, controller_id):
"""Get discovery info for specified IHC platform."""
discovery_data = {}
for group in groups:
groupname = group.attrib["name"]
for product_cfg in platform_setup:
products = group.findall(product_cfg[CONF_XPATH])
for product in products:
nodes = product.findall(product_cfg[CONF_NODE])
for node in nodes:
if "setting" in node.attrib and node.attrib["setting"] == "yes":
continue
ihc_id = int(node.attrib["id"].strip("_"), 0)
name = f"{groupname}_{ihc_id}"
device = {
"ihc_id": ihc_id,
"ctrl_id": controller_id,
"product": {
"name": product.get("name") or "",
"note": product.get("note") or "",
"position": product.get("position") or "",
},
"product_cfg": product_cfg,
}
discovery_data[name] = device
return discovery_data
def setup_service_functions(hass: HomeAssistant):
"""Set up the IHC service functions."""
def _get_controller(call):
controller_id = call.data[ATTR_CONTROLLER_ID]
ihc_key = f"ihc{controller_id}"
return hass.data[ihc_key][IHC_CONTROLLER]
def set_runtime_value_bool(call):
"""Set a IHC runtime bool value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller = _get_controller(call)
ihc_controller.set_runtime_value_bool(ihc_id, value)
def set_runtime_value_int(call):
"""Set a IHC runtime integer value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller = _get_controller(call)
ihc_controller.set_runtime_value_int(ihc_id, value)
def set_runtime_value_float(call):
"""Set a IHC runtime float value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller = _get_controller(call)
ihc_controller.set_runtime_value_float(ihc_id, value)
async def async_pulse_runtime_input(call):
"""Pulse a IHC controller input function."""
ihc_id = call.data[ATTR_IHC_ID]
ihc_controller = _get_controller(call)
await async_pulse(hass, ihc_controller, ihc_id)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_BOOL,
set_runtime_value_bool,
schema=SET_RUNTIME_VALUE_BOOL_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_INT,
set_runtime_value_int,
schema=SET_RUNTIME_VALUE_INT_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_FLOAT,
set_runtime_value_float,
schema=SET_RUNTIME_VALUE_FLOAT_SCHEMA,
)
hass.services.register(
DOMAIN, SERVICE_PULSE, async_pulse_runtime_input, schema=PULSE_SCHEMA
)
| |
# encoding: utf-8
from lkbutils import yamllib, RDFLibNodeProvider, RDFLibRelationProvider
from lkbutils.relationprovider import Cyclic
def leaves_from_struct(data):
"""
Traverse/generate structured data.
data: list or dict is acceptable / any dict
structure will be ignored, thus every list
in deepest levels is enumerated as term list.
"""
if isinstance(data, unicode):
yield data
elif isinstance(data, str):
yield unicode(data)
elif isinstance(data, list):
for child in data:
for item in leaves_from_struct(child):
yield item
elif isinstance(data, dict):
for child in data.values():
for item in leaves_from_struct(child):
yield item
else:
raise TypeError(
u'invalid data type: {}({})'.format(
type(data), data
)
)
class TermLoader(object):
"""Loads term definitions."""
def __init__(self, **options):
"""Loads term definitions."""
self._nodeprovider = self.get_node_provider(**options)
@property
def ns(self):
"""Proxy to TermLoader._nodeprovider.ns."""
return self._nodeprovider.ns
@property
def graph(self):
"""Proxy to TermLoader._nodeprovider.graph."""
return self._nodeprovider.graph
@property
def nodeprovider(self):
"""Proxy to TermLoader._nodeprovider."""
return self._nodeprovider
def get_node_provider(self, **options):
"""Delegate NodeProvider creation to subclasses."""
return self.nodeprovider_class(**options)
def load(self, data, as_property=False):
"""
Load terms from data.
data: list or dict is acceptable / any dict
structure will be ignored, thus every list
in deepest levels is loaded as term list.
as_property: load terms as properties.
"""
for term in leaves_from_struct(data):
self._addterm(term, as_property)
def load_yaml(self, yaml_data):
"""
Load terms from YAML representation.
SampleFormat:
+++++++++++++++++++++
# YAML
options:
as_property: yes
terms:
subcategory1:
- term1
- term2
subcategory2:
- term3
...
+++++++++++++++++++++
See RDFLibTermLoader.load for options.
"""
data = yamllib.parse_yaml(yaml_data)
data_terms = data.get(u'terms', [])
data_options = data.get(u'options', {})
self.load(data_terms, **data_options)
def _addterm(self, name, as_property):
self._nodeprovider.add(name, as_property=as_property)
class RDFLibTermLoader(TermLoader):
"""TermLoader subclass using RDFLibNodeProvider."""
nodeprovider_class = RDFLibNodeProvider
class YamlTermConfigLoader(object):
"""Utility for loading YAML term configurations."""
@classmethod
def load_yaml(klass, yaml_data):
"""
Load a YAML to get configs. for TermLoaders.
SampleFormat:
+++++++++++++++++++++
# YAML
options:
romanize: yes
load_options:
as_property: no
terms:
subcategory1:
- term1
- term2
subcategory2:
- term3
...
+++++++++++++++++++++
"""
data = yamllib.parse_yaml(yaml_data)
data_options = data.get(u'options', {})
term_loader = klass._create_termloader(**data_options)
data_terms = data.get(u'terms', [])
data_load_options = data.get(u'load_options', {})
term_loader.load(data_terms, **data_load_options)
return term_loader
@classmethod
def _create_termloader(klass, **options):
return klass.loader_class(**options)
class RDFLibYamlTermConfigLoader(YamlTermConfigLoader):
"""YamlTermConfigLoader using RDFLibTermLoader."""
loader_class = RDFLibTermLoader
rdflib_load_terms = RDFLibYamlTermConfigLoader.load_yaml
class RelationLoader(object):
"""Loads relation definitions."""
def __init__(self, nodeprovider=None, relation=None, **graph_options):
relation_node = getattr(nodeprovider.ns, relation)
self._relation_provider = self.create_relation_provider(
relation=relation_node,
**graph_options
)
self._nodeprovider = nodeprovider
@property
def graph(self):
"""Proxy to self._relation_provider.graph."""
return self._relation_provider.graph
@property
def relationprovider(self):
"""Proxy to self._relation_provider."""
return self._relation_provider
def create_relation_provider(self, **options):
"""Delegate RelationProvider creation to subclasses."""
return self.relationprovider_class(**options)
def load(self, pairs):
"""
Load terms from list of pairs.
pairs: list of pairs: (src, dest) whose elements are
names provided by the given node provider.
"""
for src, dest in pairs:
self._register_relation(src, dest)
def _register_relation(self, src, dest):
src_node, dest_node = self._get_node(src), self._get_node(dest)
try:
self._relation_provider.add(src_node, dest_node)
except Cyclic as cyclic_err:
self._handle_cyclic_error(cyclic_err)
def _get_node(self, identifier):
return getattr(self._nodeprovider.ns, identifier)
def _handle_cyclic_error(self, cyclic_err):
mod_relation = self._get_original_name(cyclic_err.relation)
mod_path = [
self._get_original_name(node)
for node in cyclic_err.path
]
raise Cyclic(mod_path, relation=mod_relation)
def _get_original_name(self, node):
nodeprovider = self._nodeprovider
if not hasattr(nodeprovider, 'get_identifier_from'):
return node
return nodeprovider.get_origin_name_from(node)
class RDFLibRelationLoader(RelationLoader):
"""RelationLoader subclass using RDFLibRelationProvider."""
relationprovider_class = RDFLibRelationProvider
class YamlRelationConfigLoader(object):
"""Utility for loading YAML relation configurations."""
@classmethod
def load_yaml(klass, yaml_data):
"""
Load a YAML to get configs. for RelationLoaders.
SampleFormat:
+++++++++++++++++++++
# YAML
options: # yaml-global options
dry: yes
nointerlinks: yes
acyclic: yes
relations:
relation1:
options: # override for specific relation
dry: no
pairs:
subcategory1:
- term1 term2
- term2 term3
subcategory2:
- term3 term4
...
relation2:
...
+++++++++++++++++++++
"""
data = yamllib.parse_yaml(yaml_data)
base_options = data.get(u'options', {})
relations = data.get(u'relations', {})
configs = {}
for relation in relations:
configs[relation] = klass._create_config(
relation,
relations[relation],
base_options.copy()
)
return configs
@classmethod
def relation_providers_from(klass, yaml_data, nodeprovider=None):
"""Parse config YAML, create {relation => RelationLoader} map."""
configs = klass.load_yaml(yaml_data)
return {
relation: klass._create_loader(configs[relation], nodeprovider)
for relation in configs
}
@classmethod
def _create_config(klass, relation, data, base_options):
config_override = data.get(u'options', {})
base_options.update(config_override)
data_pairs = data.get(u'pairs', [])
pairs = [
klass._parse_pair(pair_repr)
for pair_repr in leaves_from_struct(data_pairs)
]
return {
u'relation': relation,
u'options': base_options,
u'pairs': pairs,
}
@classmethod
def _parse_pair(klass, pair_repr):
return tuple(pair_repr.split(u' '))
@classmethod
def _create_loader(klass, loader_config, nodeprovider):
relation = loader_config.get(u'relation')
options = loader_config.get(u'options', {})
relation_loader = klass.loader_class(
nodeprovider=nodeprovider,
relation=relation,
**options
)
pairs = loader_config.get(u'pairs', [])
relation_loader.load(pairs)
return relation_loader
class RDFLibYamlRelationConfigLoader(YamlRelationConfigLoader):
"""YamlRelationConfigLoader using RDFLibRelationLoader."""
loader_class = RDFLibRelationLoader
rdflib_load_relcfg = RDFLibYamlRelationConfigLoader.load_yaml
rdflib_load_relations = RDFLibYamlRelationConfigLoader.relation_providers_from
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Interfaces to the Sun/Oracle/Open Grid Engine batch queueing systems.
"""
# Copyright (C) 2011, 2012 ETH Zurich and University of Zurich. All rights reserved.
#
# Authors:
# Christian Panse <cp@fgcz.ethz.ch>
# Riccardo Murri <riccardo.murri@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
__docformat__ = 'reStructuredText'
__version__ = '$Revision$'
# stdlib imports
from collections import Mapping, defaultdict
import os
import subprocess
import sys
import time
import UserDict
import xml.sax
# local imports
from vmmad import log
from vmmad.batchsys import BatchSystem
from vmmad.orchestrator import JobInfo
from vmmad.util import Struct
class _QstatXmlHandler(xml.sax.ContentHandler):
"""
SAX `ContentHandler` implementation for parsing the output of
`qstat -u ... -xml`.
"""
# XML attributes *not* in this list are ignored
JOB_ATTRIBUTES = [
'JB_job_number',
'JB_submission_time',
'JAT_start_time',
'JAT_prio',
'JB_name',
'state',
'queue_name',
'slots'
]
# conversion of XML fields to Python data
CONVERT = defaultdict(
# by default, return `str`
lambda: str,
# return other values in particular cases:
JB_job_number=int,
JB_submission_time=str,
JAT_prio=float,
slots=int,
)
# rename fields to adhere to what the `JobInfo` ctor expects
@staticmethod
def rename(field):
try:
# map field names according to this...
return {
'JB_job_number':'jobid',
'JB_name':'name',
}[field]
except KeyError:
# ...and by default, keep field name unchanged
return field
def __init__(self, dest):
self.jobs = dest
self.value = [ ]
self._level = 0
def startElement(self,name,attrs):
self._level += 1
if name == 'job_list':
assert 'state' in attrs
self.current = JobInfo(jobid='invalid', state=JobInfo.OTHER)
self.current_job_state = attrs['state']
## for other elements, just reset `value` so we can
## accumulate characters in `self.characters`
else:
self.value = []
def characters(self, chrs):
self.value.append(chrs)
def endElement(self,name):
self._level -= 1
#self.current.submit_time = "1970-01-01T00:00:00"
if 0 == self._level:
# end of XML
return
if 'job_list' == name:
# end of job description, commit
self.jobs.append(self.current)
return
# process job-level elements
value_str = str(str.join('', self.value))
if 'queue_name' == name:
if '' == value_str:
self.current['queue_name'] = None
self.current['exec_node_name'] = None
else:
self.current['queue_name'] = value_str
# FIXME: GE's queue names have the form queue@hostname;
# raise an appropriate exception if this is not the case!
at = value_str.index('@') + 1
self.current['exec_node_name'] = value_str[at:]
elif 'state' == name:
# the GE state letters are explained in the `qstat` man page
if (('E' in value_str)
or ('h' in value_str)
or ('T' in value_str)
or ('s' in value_str) or ('S' in value_str)
or ('d' in value_str)):
self.current.state = JobInfo.OTHER
elif 'q' in value_str:
self.current.state = JobInfo.PENDING
elif ('r' in value_str) or ('t' in value_str):
self.current.state = JobInfo.RUNNING
elif 'JB_job_number' == name:
self.current.jobid = value_str
elif 'JB_submission_time' == name:
self.current.submitted_at = time.mktime(time.strptime(value_str, '%Y-%m-%dT%H:%M:%S'))
elif 'JAT_start_time' == name:
self.current.running_at = time.mktime(time.strptime(value_str, '%Y-%m-%dT%H:%M:%S'))
elif name in self.JOB_ATTRIBUTES:
# convert each XML attribute to a Python representation
# (defaulting to `str`, see CONVERT above)
self.current[self.rename(name)] = self.CONVERT[name](value_str)
return
class GridEngine(BatchSystem):
"""
Abstract base class describing the interface that a node provider
should implement.
"""
def __init__(self, user='*'):
"""
Set up parameters for querying SGE.
"""
self.user = user
def run_qstat(self):
try:
qstat_cmd = ['qstat', '-u', self.user, '-xml']
qstat_process = subprocess.Popen(
qstat_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
stdout, stderr = qstat_process.communicate()
return stdout
except subprocess.CalledProcessError, ex:
logging.error("Error running '%s': '%s'; exit code %d",
str.join(' ', qstat_cmd), stderr, ex.returncode)
raise
@staticmethod
def parse_qstat_xml_output(qstat_xml_out):
"""
Parse the output of a `qstat -xml` command and return a
tuple `(jobid,submit_time,duration)`, where each item is a list of
dictionary-like objects whose keys/attributes directly map the
XML contents.
"""
jobs = [ ]
xml.sax.make_parser()
xml.sax.parseString(qstat_xml_out, _QstatXmlHandler(jobs))
return jobs
def get_sched_info(self):
"""
Query SGE through ``qstat -xml`` and return a list of
`JobInfo` objects representing the jobs in the batch queue
system.
"""
qstat_xml_out = self.run_qstat()
return self.parse_qstat_xml_output(qstat_xml_out)
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
NACLPORTS_URL = 'https://chromium.googlesource.com/external/naclports.git'
NACLPORTS_REV = '65c71c1524a74ff8415573e5e5ef7c59ce4ac437'
GYPBUILD_DIR = 'gypbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir).
TOOLCHAIN_PACKAGE_MAP = {
'newlib': ('nacl_x86_newlib', '%(platform)s_x86_newlib'),
'bionic': ('nacl_arm_bionic', '%(platform)s_arm_bionic'),
'arm': ('nacl_arm_newlib', '%(platform)s_arm_newlib'),
'glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl')
}
def GetToolchainNaClInclude(tcname, tcpath, arch):
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetConfigDir(arch):
if arch.endswith('x64') and getos.GetPlatform() == 'win':
return 'Release_x64'
else:
return 'Release'
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GYPBUILD_DIR + '-' + arch, GetConfigDir(arch))
def GetGypBuiltLib(tcname, arch):
if arch == 'ia32':
lib_suffix = '32'
elif arch == 'x64':
lib_suffix = '64'
elif arch == 'arm':
lib_suffix = 'arm'
else:
lib_suffix = ''
if tcname == 'pnacl':
print arch
if arch is None:
arch = 'x64'
tcname = 'pnacl_newlib'
else:
arch = 'clang-' + arch
tcname = 'newlib'
return os.path.join(GetNinjaOutDir(arch),
'gen',
'tc_' + tcname,
'lib' + lib_suffix)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'ia32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetToolchainDirName(tcname, arch):
if tcname == 'pnacl':
return '%s_%s' % (getos.GetPlatform(), tcname)
elif arch == 'arm':
return '%s_arm_%s' % (getos.GetPlatform(), tcname)
else:
return '%s_x86_%s' % (getos.GetPlatform(), tcname)
def GetGypToolchainLib(tcname, arch):
if arch == 'arm':
toolchain = arch
else:
toolchain = tcname
tcpath = os.path.join(GetNinjaOutDir(arch), 'gen', 'sdk',
'%s_x86' % getos.GetPlatform(),
TOOLCHAIN_PACKAGE_MAP[toolchain][0])
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain',
GetToolchainDirName(tcname, arch))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
if 'bionic' in toolchains:
build_platform = '%s_x86' % getos.GetPlatform()
args.extend(['--append', os.path.join(build_platform, 'nacl_arm_bionic')])
if getos.GetPlatform() == 'linux':
# TODO(sbc): remove this once this change makes it into chrome
# https://codereview.chromium.org/1080513003/
args.extend(['--append', 'arm_trusted'])
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcname = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcname % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'bionic': [
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'host': []
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tc_name):
"""Copies NaCl headers to expected locations in the toolchain."""
if tc_name in ('arm', 'pnacl'):
# arm and pnacl toolchain headers should be the same as the newlib
# ones
tc_name = 'newlib'
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[tc_name])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
# TODO(ncbray): stop building and copying libraries into the SDK that are
# already provided by the toolchain.
TOOLCHAIN_LIBS = {
'bionic' : [
'libminidump_generator.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libppapi.a',
],
'newlib' : [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
],
'pnacl': [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
tools_files_32 = [
['sel_ldr', 'sel_ldr_x86_32'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
tools_files_64 = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files_64.append(['sel_ldr', 'sel_ldr_x86_64'])
tools_files_64.append(['ncval_new', 'ncval'])
if platform == 'linux':
tools_files_32.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files_64.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64'])
tools_files_32.append(['nonsfi_loader_newlib_x32_nonsfi.nexe',
'nonsfi_loader_x86_32'])
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_32 + tools_files_64:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_64)
InstallFiles(GetNinjaOutDir('ia32'), tools_dir, tools_files_32)
# Add ARM binaries
if platform == 'linux' and not options.no_arm_trusted:
arm_files = [
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['nonsfi_loader_newlib_arm_nonsfi.nexe', 'nonsfi_loader_arm'],
['sel_ldr', 'sel_ldr_arm']
]
InstallFiles(GetNinjaOutDir('arm'), tools_dir, arm_files)
for tc in set(toolchains) & set(['newlib', 'glibc', 'pnacl']):
if tc == 'pnacl':
xarches = (None, 'ia32', 'x64', 'arm')
elif tc == 'glibc':
xarches = ('ia32', 'x64')
else:
xarches = ('arm', 'ia32', 'x64')
for xarch in xarches:
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[tc])
# Copy ARM newlib components to bionic
if tc == 'newlib' and xarch == 'arm' and 'bionic' in toolchains:
bionic_dir = GetOutputToolchainLib(pepperdir, 'bionic', xarch)
InstallFiles(src_dir, bionic_dir, TOOLCHAIN_LIBS['bionic'])
def GypNinjaBuild_NaCl(rel_out_dir):
# TODO(binji): gyp_nacl doesn't build properly on Windows anymore; it only
# can use VS2010, not VS2013 which is now required by the Chromium repo. NaCl
# needs to be updated to perform the same logic as Chromium in detecting VS,
# which can now exist in the depot_tools directory.
# See https://code.google.com/p/nativeclient/issues/detail?id=4022
#
# For now, let's use gyp_chromium to build these components.
# gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir_32 = MakeNinjaRelPath(rel_out_dir + '-ia32')
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-x64')
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
out_dir_clang_32 = MakeNinjaRelPath(rel_out_dir + '-clang-ia32')
out_dir_clang_64 = MakeNinjaRelPath(rel_out_dir + '-clang-x64')
out_dir_clang_arm = MakeNinjaRelPath(rel_out_dir + '-clang-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_32)
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_64)
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm)
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_32, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_64, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_arm, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, all_gyp, 'ncval_new', out_dir_64)
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('x64', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir, gyp_defines=None):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir,
gyp_defines=gyp_defines)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, force_arm_gcc=True, gyp_defines=None):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = gyp_defines or []
gyp_defines.append('nacl_allow_thin_archives=0')
if not options.no_use_sysroot:
gyp_defines.append('use_sysroot=1')
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch is not None:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
gyp_env['GYP_CROSSCOMPILE'] = '1'
# The arm glibc toolchain is currently having issues on windows.
# TODO(sbc): remove this once we fix the issue
# https://code.google.com/p/nativeclient/issues/detail?id=4225
gyp_defines.append('disable_glibc=1')
if options.no_arm_trusted:
gyp_defines.append('disable_cross_trusted=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
# We can't use windows path separators in GYP_GENERATOR_FLAGS since
# gyp uses shlex to parse them and treats '\' as an escape char.
gyp_env['GYP_GENERATOR_FLAGS'] = 'output_dir=%s' % out_dir.replace('\\', '/')
# Print relevant environment variables
for key, value in gyp_env.iteritems():
if key.startswith('GYP') or key in ('CC',):
print ' %s="%s"' % (key, value)
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, '--depth=.'],
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir, arch)
def NinjaBuild(targets, out_dir, arch):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, GetConfigDir(arch))
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GYPBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR + '-x64')
if set(toolchains) & set(['glibc', 'newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-ia32')
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-x64')
if 'arm' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm')
if 'pnacl' in toolchains:
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-clang-ia32',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-clang-x64',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-clang-arm',
['use_nacl_clang=1'])
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
GypNinjaInstall(pepperdir, toolchains)
platform = getos.GetPlatform()
newlibdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_newlib')
glibcdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_glibc')
armdir = os.path.join(pepperdir, 'toolchain', platform + '_arm_newlib')
pnacldir = os.path.join(pepperdir, 'toolchain', platform + '_pnacl')
bionicdir = os.path.join(pepperdir, 'toolchain', platform + '_arm_bionic')
if 'newlib' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', newlibdir, 'x86'),
'newlib')
if 'glibc' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('glibc', glibcdir, 'x86'),
'glibc')
if 'arm' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', armdir, 'arm'),
'arm')
if 'bionic' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('bionic', bionicdir, 'arm'),
'bionic')
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'pnacl'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
if toolchains:
toolchains = toolchains[:]
# arm isn't a valid toolchain for build_projects
if 'arm' in toolchains:
toolchains.remove('arm')
if 'host' in toolchains:
toolchains.remove('host')
toolchains.append(getos.GetPlatform())
filters['TOOLS'] = toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepSyncNaClPorts():
"""Pull the pinned revision of naclports from SVN."""
buildbot_common.BuildStep('Sync naclports')
# In case a previous non-gclient checkout exists, remove it.
# TODO(sbc): remove this once all the build machines
# have removed the old checkout
if (os.path.exists(NACLPORTS_DIR) and
not os.path.exists(os.path.join(NACLPORTS_DIR, 'src'))):
buildbot_common.RemoveDir(NACLPORTS_DIR)
if not os.path.exists(NACLPORTS_DIR):
buildbot_common.MakeDir(NACLPORTS_DIR)
# checkout new copy of naclports
cmd = ['gclient', 'config', '--name=src', NACLPORTS_URL]
buildbot_common.Run(cmd, cwd=NACLPORTS_DIR)
# sync to required revision
cmd = ['gclient', 'sync', '-R', '-r', 'src@' + str(NACLPORTS_REV)]
buildbot_common.Run(cmd, cwd=NACLPORTS_DIR)
def BuildStepBuildNaClPorts(pepper_ver, pepperdir):
"""Build selected naclports in all configurations."""
# TODO(sbc): currently naclports doesn't know anything about
# Debug builds so the Debug subfolders are all empty.
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['PEPPER_DIR'] = os.path.basename(pepperdir) # pepper_NN
env['NACLPORTS_NO_ANNOTATE'] = "1"
env['NACLPORTS_NO_UPLOAD'] = "1"
env['BUILDBOT_GOT_REVISION'] = str(NACLPORTS_REV)
build_script = 'build_tools/buildbot_sdk_bundle.sh'
buildbot_common.BuildStep('Build naclports')
naclports_src = os.path.join(NACLPORTS_DIR, 'src')
bundle_dir = os.path.join(naclports_src, 'out', 'sdk_bundle')
out_dir = os.path.join(bundle_dir, 'pepper_%s' % pepper_ver)
# Remove the sdk_bundle directory to remove stale files from previous builds.
buildbot_common.RemoveDir(bundle_dir)
buildbot_common.Run([build_script], env=env, cwd=naclports_src)
# Some naclports do not include a standalone LICENSE/COPYING file
# so we explicitly list those here for inclusion.
extra_licenses = ('tinyxml/readme.txt',
'jpeg-8d/README',
'zlib-1.2.3/README')
src_root = os.path.join(naclports_src, 'out', 'build')
output_license = os.path.join(out_dir, 'ports', 'LICENSE')
GenerateNotice(src_root, output_license, extra_licenses)
readme = os.path.join(out_dir, 'ports', 'README')
oshelpers.Copy(['-v', os.path.join(SDK_SRC_DIR, 'README.naclports'), readme])
def BuildStepTarNaClPorts(pepper_ver, tarfile):
"""Create tar archive containing headers and libs from naclports build."""
buildbot_common.BuildStep('Tar naclports Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
pepper_dir = 'pepper_%s' % pepper_ver
archive_dirs = [os.path.join(pepper_dir, 'ports')]
ports_out = os.path.join(NACLPORTS_DIR, 'src', 'out', 'sdk_bundle')
cmd = [sys.executable, CYGTAR, '-C', ports_out, '-cjf', tarfile]
cmd += archive_dirs
buildbot_common.Run(cmd, cwd=NACL_DIR)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--nacl-tree-path',
help='Path to native client tree for bionic build.',
dest='nacl_tree_path')
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--bionic', help='Add bionic build.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-ports',
help='Build naclport bundle.', action='store_true')
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gypbuild directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if options.nacl_tree_path:
options.bionic = True
toolchain_build = os.path.join(options.nacl_tree_path, 'toolchain_build')
print 'WARNING: Building bionic toolchain from NaCl checkout.'
print 'This option builds bionic from the sources currently in the'
print 'provided NativeClient checkout, and the results instead of '
print 'downloading a toolchain from the builder. This may result in a'
print 'NaCl SDK that can not run on ToT chrome.'
print 'NOTE: To clobber you will need to run toolchain_build_bionic.py'
print 'directly from the NativeClient checkout.'
print ''
response = raw_input("Type 'y' and hit enter to continue.\n")
if response != 'y' and response != 'Y':
print 'Aborting.'
return 1
# Get head version of NativeClient tree
buildbot_common.BuildStep('Build bionic toolchain.')
buildbot_common.Run([sys.executable, 'toolchain_build_bionic.py', '-f'],
cwd=toolchain_build)
else:
toolchain_build = None
if buildbot_common.IsSDKBuilder():
options.archive = True
options.build_ports = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'newlib', 'glibc', 'arm', 'clang-newlib', 'host']
# Changes for experimental bionic builder
if options.bionic:
toolchains.append('bionic')
options.build_ports = False
options.build_app_engine = False
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
if options.bionic:
tarname = 'naclsdk_bionic.tar.bz2'
else:
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
if options.nacl_tree_path:
# Instead of untarring, copy the raw bionic toolchain
not_bionic = [i for i in toolchains if i != 'bionic']
BuildStepUntarToolchains(pepperdir, not_bionic)
tcname = GetToolchainDirName('bionic', 'arm')
srcdir = os.path.join(toolchain_build, 'out', tcname)
bionicdir = os.path.join(pepperdir, 'toolchain', tcname)
oshelpers.Copy(['-r', srcdir, bionicdir])
else:
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
'usr/lib/arm-linux-gnueabihf/libstdc++.so.6'
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
if not options.bionic:
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_ports:
ports_tarfile = os.path.join(OUT_DIR, 'naclports.tar.bz2')
BuildStepSyncNaClPorts()
BuildStepBuildNaClPorts(pepper_ver, pepperdir)
if options.tar:
BuildStepTarNaClPorts(pepper_ver, ports_tarfile)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
if options.build_ports:
BuildStepArchiveBundle('naclports', pepper_ver, chrome_revision,
nacl_revision, ports_tarfile)
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| |
# -*- coding: utf-8 -*-
from flask import request, redirect, render_template, url_for, json, flash, Flask
import datetime
import forms
import arduino
app = Flask(__name__)
###VARIABLES
###LUCES
estadoLuzHabitacion1=False
estadoLuzHabitacion2=False
estadoLuzHabitacion3=False
estadoLuzCocina=False
estadoLuzComedor=False
estadoLuzLiving=False
estadoLuzLiving2=False
estadoLuzQuincho=False
estadoLuzQuincho2=False
estadoLuzPileta=False
estadoLuzJardin=False
estadoLuzPatio=False
estadoLuzGeneral=False
###OTROS
estadoTelevisionGeneral=False
###CORTINAS
estadoCortinaHabitacion1=False
estadoCortinaHabitacion2=False
estadoCortinaHabitacion3=False
@app.route('/', methods=['GET' , 'POST'])
@app.route('/Inicio', methods=['GET' , 'POST'])
def Inicio():
electroForm=forms.electro(request.form)
luz=electroForm.luz.data
luzPileta=electroForm.luzPileta.data
luzPatio=electroForm.luzPatio.data
luzJardin=electroForm.luzJardin.data
luzCocina=electroForm.luzCocina.data
luzComedor=electroForm.luzComedor.data
luzLiving=electroForm.luzLiving.data
luzLiving2=electroForm.luzLiving2.data
luzQuincho=electroForm.luzQuincho.data
luzQuincho2=electroForm.luzQuincho2.data
global estadoLuzGeneral
if (luz=="on"):
estadoLuzGeneral=True
arduino.send("luz", True, "General")
elif(luz=="off"):
estadoLuzGeneral=False
arduino.send("luz", False, "General")
global estadoLuzPileta
if (luzPileta=="on"):
estadoLuzPileta=True
arduino.send("luz", True, "Pileta")
elif(luzPileta=="off"):
estadoLuzPileta=False
arduino.send("luz", False, "Pileta")
global estadoLuzPatio
if (luzPatio=="on"):
estadoLuzPatio=True
arduino.send("luz", True, "Patio")
elif(luzPatio=="off"):
estadoLuzPatio=False
arduino.send("luz", False, "Patio")
global estadoLuzJardin
if (luzJardin=="on"):
estadoLuzJardin=True
arduino.send("luz", True, "Jardin")
elif(luzJardin=="off"):
estadoLuzJardin=False
arduino.send("luz", False, "Jardin")
global estadoLuzCocina
if (luzCocina=="on"):
estadoLuzCocina=True
arduino.send("luz", True, "Cocina")
elif(luzCocina=="off"):
estadoLuzCocina=False
arduino.send("luz", False, "Cocina")
global estadoLuzComedor
if (luzComedor=="on"):
estadoLuzComedor=True
arduino.send("luz", True, "Comedor")
elif(luzComedor=="off"):
estadoLuzComedor=False
arduino.send("luz", False, "Comedor")
global estadoLuzLiving
if (luzLiving=="on"):
estadoLuzLiving=True
arduino.send("luz", True, "Living")
elif(luzLiving=="off"):
estadoLuzLiving=False
arduino.send("luz", False, "Living")
global estadoLuzLiving2
if (luzLiving2=="on"):
estadoLuzLiving2=True
arduino.send("luz", True, "Living2")
elif(luzLiving2=="off"):
estadoLuzLiving2=False
arduino.send("luz", False, "Living2")
global estadoLuzQuincho
if (luzQuincho=="on"):
estadoLuzQuincho=True
arduino.send("luz", True, "Quincho")
elif(luzQuincho=="off"):
estadoLuzQuincho=False
arduino.send("luz", False, "Quincho")
global estadoLuzQuincho2
if (luzQuincho2=="on"):
estadoLuzQuincho2=True
arduino.send("luz", True, "Quincho2")
elif(luzQuincho2=="off"):
estadoLuzQuincho2=False
arduino.send("luz", False, "Quincho2")
return render_template('index.html', estadoLuz=estadoLuzGeneral, estadoLuzPatio=estadoLuzPatio, estadoLuzJardin=estadoLuzJardin,
estadoLuzPileta=estadoLuzPileta, estadoLuzCocina=estadoLuzCocina, estadoLuzComedor=estadoLuzComedor,
estadoLuzLiving=estadoLuzLiving, estadoLuzLiving2=estadoLuzLiving2, estadoLuzQuincho=estadoLuzQuincho,
estadoLuzQuincho2=estadoLuzQuincho2)
#return render_template('index.html')
@app.route('/Habitacion1', methods=['GET' , 'POST'])
def Habitacion1():
electroForm=forms.electro(request.form)
luz=electroForm.luz.data
cortina=electroForm.cortina.data
global estadoLuzHabitacion1
if (luz=="on"):
estadoLuzHabitacion1=True
arduino.send("luz", True, "Habitacion1")
elif(luz=="off"):
estadoLuzHabitacion1=False
arduino.send("luz", False, "Habitacion1")
global estadoCortinaHabitacion1
if (cortina=="up"):
estadoCortinaHabitacion1=True
arduino.send("cortina", True, "Habitacion1")
elif (cortina=="down"):
estadoCortinaHabitacion1=False
arduino.send("cortina", False, "Habitacion1")
return render_template('habitacion1.html', estadoLuz=estadoLuzHabitacion1, estadoCortina=estadoCortinaHabitacion1)
@app.route('/Habitacion2', methods=['GET' , 'POST'])
def Habitacion2():
electroForm=forms.electro(request.form)
luz=electroForm.luz.data
cortina=electroForm.cortina.data
global estadoLuzHabitacion2
if (luz=="on"):
estadoLuzHabitacion2=True
arduino.send("luz", True, "Habitacion2")
elif(luz=="off"):
estadoLuzHabitacion2=False
arduino.send("luz", False, "Habitacion2")
global estadoCortinaHabitacion2
if (cortina=="up"):
estadoCortinaHabitacion2=True
arduino.send("cortina", True, "Habitacion2")
elif (cortina=="down"):
estadoCortinaHabitacion2=False
arduino.send("cortina", False, "Habitacion2")
return render_template('habitacion2.html', estadoLuz=estadoLuzHabitacion2, estadoCortina=estadoCortinaHabitacion2)
@app.route('/Habitacion3', methods=['GET' , 'POST'])
def Habitacion3():
electroForm=forms.electro(request.form)
luz=electroForm.luz.data
cortina=electroForm.cortina.data
global estadoLuzHabitacion3
if (luz=="on"):
estadoLuzHabitacion3=True
arduino.send("luz", True, "Habitacion3")
elif(luz=="off"):
estadoLuzHabitacion3=False
arduino.send("luz", False, "Habitacion3")
global estadoCortinaHabitacion3
if (cortina=="up"):
estadoCortinaHabitacion3=True
arduino.send("cortina", True, "Habitacion3")
elif (cortina=="down"):
estadoCortinaHabitacion3=False
arduino.send("cortina", False, "Habitacion3")
return render_template('habitacion3.html', estadoLuz=estadoLuzHabitacion3, estadoCortina=estadoCortinaHabitacion3)
@app.route('/Manager', methods=['GET' , 'POST'])
def Manager():
return render_template('manager.html')
if __name__ == "__main__":
app.run(host='192.168.0.8', port=80, debug=True)
| |
# -*- coding: utf-8 -*-
# Copyright 2017 DST Controls
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
osisoftpy.tests.test_webapi.py
~~~~~~~~~~~~
Tests for the `osisoftpy.webapi` module.
"""
import re
import osisoftpy
import pytest
import requests
import random
import time
from datetime import datetime, timedelta
from dateutil import parser
from .conftest import query
# https://techsupport.osisoft.com/Troubleshooting/Known-Issues/176830
piserverissue = True
def test_get_webapi_object(webapi):
assert isinstance(webapi, osisoftpy.WebAPI)
def test_webapi_has_session(webapi):
print(', '.join("%s: %s" % item for item in vars(webapi).items()))
assert isinstance(webapi.session, requests.Session)
def test_webapi_has_links(webapi):
print(', '.join("%s: %s" % item for item in vars(webapi).items()))
assert isinstance(webapi.links, dict)
def test_webapi_has_str_(webapi, url):
assert webapi.__str__() == '<OSIsoft PI Web API [{}]>'.format(url+'/')
def test_webapi_has_self_url(webapi, url):
assert webapi.links.get('Self') == url + '/'
def test_webapi_has_self_url_property(webapi, url):
assert webapi.url == url+ '/'
def test_webapi_has_search_url(webapi, url):
assert webapi.links.get('Search') == url + '/search'
def test_webapi_has_dataservers(webapi):
assert webapi.dataservers.__len__() == 2
def test_webapi_query_sinusoid(webapi):
tag = 'sinusoid'
payload = dict(query="name:{}".format(tag), count=10)
r = webapi.request(**payload)
assert r.status_code == requests.codes.ok
assert r.json().get('TotalHits') > 0
assert r.json().get('Items')[0].get('Name').lower() == 'sinusoid'
assert bool(
re.match(r.json().get('Items')[0].get('Name'), tag, re.IGNORECASE))
def test_webapi_points_sinusoid(webapi):
tag = 'sinusoid'
payload = dict(query="name:{}".format(tag), count=10, scope='pi:gold')
r = webapi.points(**payload)
assert all(isinstance(x, osisoftpy.Point) for x in r)
assert r.__len__() == 1
@pytest.mark.parametrize('query', query())
def test_webapi_points_query(webapi, query):
payload = dict(query=query, count=1000)
points = webapi.points(**payload)
assert all(isinstance(x, osisoftpy.Point) for x in points)
msg = '{} points were retrieved with the query "{}"'
print(msg.format(points.__len__(), query))
def test_webapi_points_scope(webapi):
points = webapi.points(query='name:SINUSOID*', scope='pi:gold')
assert points.__len__() == 4
#TODO: Fix this test; temp fix in place
def test_webapi_points_pagination(webapi):
points = webapi.points(query='name:S*')
assert points.__len__() == 578
# Subscription tests
# a list to store modified points in:
updated_points = []
def callback(sender):
updated_points.append(sender)
# test getvalue
@pytest.mark.skipif(True, reason='Method only used for internal testing')
@pytest.mark.parametrize('query', ['name:PythonInserted_appveyor*'])
@pytest.mark.parametrize('stream', ['getvalue'])
def test_subscription_getvalue(webapi, query, stream, callback=callback):
updated_points[:] = []
points = webapi.points(query=query)
subscriptions = webapi.subscribe(points, stream, callback=callback)
for point in points:
v1 = point.getvalue("5-16-2017 07:00")
v2 = point.getvalue("5-17-2017 07:00")
assert len(updated_points) > 0
subscriptions = webapi.unsubscribe(points, stream)
updated_points_current = []
def callback_current(sender):
updated_points_current.append(sender)
# test current_value
@pytest.mark.parametrize('query', ['name:PythonInserted_appveyor'])
@pytest.mark.parametrize('stream', ['current'])
def test_subscription_current(webapi, query, stream, callback=callback_current):
#clear array from previous tests
updated_points_current[:] = []
points = webapi.points(query=query)
subscriptions = webapi.subscribe(points, stream, callback=callback_current)
for point in points:
v1 = point.current()
point.update_values(["*"], [random.uniform(0,100)])
time.sleep(0.5)
v2 = point.current()
assert len(updated_points_current) == 1 # one point updated
subscriptions = webapi.unsubscribe(points, stream)
updated_points_end = []
def callback_end(sender):
updated_points_end.append(sender)
# test end_value
@pytest.mark.parametrize('query', ['name:PythonInserted_travis'])
@pytest.mark.parametrize('stream', ['end'])
def test_subscription_end(webapi, query, stream, callback=callback_end):
#clear array from previous tests
updated_points_end[:] = []
points = webapi.points(query=query)
subscriptions = webapi.subscribe(points, stream, callback=callback_end)
for point in points:
point.update_values(["*"], [random.uniform(0,100)])
time.sleep(0.5)
v1 = point.end()
point.update_values(["*+1m"], [random.uniform(0,100)])
time.sleep(0.5)
v2 = point.end()
assert len(updated_points_end) == 1
subscriptions = webapi.unsubscribe(points, stream)
updated_points_interp_1 = []
def callback_interp_1(sender):
updated_points_interp_1.append(sender)
# test interpolatedattimes - assumes no one has used this tag
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['name:PythonInterpolatedAtTime'])
# @pytest.mark.parametrize('times', [['2017-01-01T00:00:00Z']])
def test_subscription_interpolatedattimes_single_timestamp_notify_one(webapi, query, now, ci, pythonversion, callback=callback_interp_1):
#clear array from previous tests
updated_points_interp_1[:] = []
times = [now.shift(hours=-168).format('YYYY-MM-DD HH:mm:ss ZZ')]
#query points (should be 1)
points = webapi.points(query='{}_{}{}'.format(query, ci, pythonversion))
for point in points:
for t in times:
#subscriber each timestamp for this point
webapi.subscribe(points, 'interpolatedattimes', startdatetime=t, callback=callback_interp_1)
#setup with values here: insert a value 1 day before and after timestamp: 0 to 1000
#datetime is parsed so days can added/subtracted
parseddatetime = parser.parse(t)
date1 = (parseddatetime + timedelta(minutes=-1)).strftime('%Y-%m-%dT%H:%M:%SZ')
point.update_value(date1, 0)
time.sleep(0.5)
date2 = (parseddatetime + timedelta(minutes=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
point.update_value(date2, 1000)
time.sleep(0.5)
#gets initial value for subscriber
point.interpolatedattimes([t])
#updates after value to 500, so there should be a new interpolated value
point.update_value(date1, 500)
time.sleep(0.5)
#queries new point and should trigger callback function
point.interpolatedattimes([t])
assert len(updated_points_interp_1) == 1
webapi.unsubscribe(points, 'interpolatedattimes')
updated_points_interp_2 = []
def callback_interp_2(sender):
updated_points_interp_2.append(sender)
# test interpolatedattimes - assumes no one has used this tag
@pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['name:PythonInterpolatedAtTime'])
# @pytest.mark.parametrize('times', [['2016-05-01T00:00:00Z','2016-06-01T00:00:00Z']])
def test_subscription_interpolatedattimes_single_timestamp_notify_two(webapi, query, now, ci, pythonversion, callback=callback_interp_2):
#clear array from previous tests
updated_points_interp_2[:] = []
times = [now.shift(hours=-48).format('YYYY-MM-DD HH:mm:ss ZZ'), now.shift(hours=-96).format('YYYY-MM-DD HH:mm:ss ZZ')]
#query points (should be 1)
points = webapi.points(query='{}_{}{}'.format(query, ci, pythonversion))
for point in points:
for t in times:
#subscriber each timestamp for this point
webapi.subscribe(points, 'interpolatedattimes', startdatetime=t, callback=callback_interp_2)
#setup with values here: insert a value 1 day before and after timestamp: 0 to 1000
#datetime is parsed so days can added/subtracted
parseddatetime = parser.parse(t)
date1 = (parseddatetime + timedelta(minutes=-1)).strftime('%Y-%m-%dT%H:%M:%SZ')
point.update_value(date1, 0)
time.sleep(0.5)
date2 = (parseddatetime + timedelta(minutes=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
point.update_value(date2, 1000)
time.sleep(0.5)
#gets initial values for subscriber
point.interpolatedattimes(times)
#queries new value and should trigger callback function
for point in points:
for t in times:
#updates after value to 500, so there should be a new interpolated value
parseddatetime = parser.parse(t)
date2 = (parseddatetime + timedelta(minutes=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
point.update_value(date2, 500)
time.sleep(0.5)
point.interpolatedattimes(times)
assert updated_points_interp_2.__len__() == 2
webapi.unsubscribe(points, 'interpolatedattimes')
updated_points_recorded = []
def callback_recorded(sender):
updated_points_recorded.append(sender)
# test recordedattimes - assumes no one has used this tag
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['name:PythonRecordedAtTime'])
# @pytest.mark.parametrize('time', ['2017-01-01T00:00:00Z','2017-01-02T00:00:00Z'])
def test_subscription_recordedattimes(webapi, query, now, ci, pythonversion, callback=callback_recorded):
#clear array from previous test
updated_points_recorded[:] = []
t = now.shift(hours=-26).format('YYYY-MM-DD HH:mm:ss ZZ')
#query points (should be 1)
points = webapi.points(query='{}_{}{}'.format(query, ci, pythonversion))
for point in points:
webapi.subscribe(points, 'recordedattime', startdatetime=t, callback=callback_recorded)
# parseddatetime = parser.parse(time)
# date = (parseddatetime + timedelta(days=-1)).strftime('%Y-%m-%dT%H:%M:%SZ')
point.update_value(t, 134)
time.sleep(0.5)
point.recordedattime(t)
point.update_value(t, 160)
time.sleep(0.5)
#should trigger callback function
point.recordedattime(t)
assert len(updated_points_recorded) == 1
webapi.unsubscribe(points, 'recordedattime')
# AF Tests
def test_webapi_has_assetservers_objects(webapi):
assert all(isinstance(assetserver, osisoftpy.AssetServer) for assetserver in webapi.assetservers)
assert webapi.assetservers.__len__() > 0
def test_webapi_has_assetdatabases(webapi):
servers = webapi.assetservers
for assetserver in servers:
print('AF Server: {0}'.format(assetserver.name))
if (assetserver.name == 'GOLD'):
afdatabases = assetserver.get_databases()
num_afdatabases = afdatabases.__len__()
assert num_afdatabases > 2
| |
# Copyright Anne M. Archibald 2008
# Released under the scipy license
import numpy as np
from heapq import heappush, heappop
# import scipy.sparse ### removed method that uses this -rdh
def minkowski_distance_p(x,y,p=2):
"""Compute the pth power of the L**p distance between x and y
For efficiency, this function computes the L**p distance but does
not extract the pth root. If p is 1 or infinity, this is equal to
the actual L**p distance.
"""
x = np.asarray(x)
y = np.asarray(y)
if p==np.inf:
return np.amax(np.abs(y-x),axis=-1)
elif p==1:
return np.sum(np.abs(y-x),axis=-1)
else:
return np.sum(np.abs(y-x)**p,axis=-1)
def minkowski_distance(x,y,p=2):
"""Compute the L**p distance between x and y"""
x = np.asarray(x)
y = np.asarray(y)
if p==np.inf or p==1:
return minkowski_distance_p(x,y,p)
else:
return minkowski_distance_p(x,y,p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(np.float)
self.mins = np.minimum(maxes,mins).astype(np.float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % zip(self.mins, self.maxes)
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""Produce two hyperrectangles by splitting along axis d.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
""" # FIXME: do this
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""Compute the minimum distance between x and a point in the hyperrectangle."""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""Compute the maximum distance between x and a point in the hyperrectangle."""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""Compute the minimum distance between points in the two hyperrectangles."""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""Compute the maximum distance between points in the two hyperrectangles."""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points
which can be used to rapidly look up the nearest neighbors of any
point.
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary trie, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
"""Construct a kd-tree.
Parameters:
===========
data : array-like, shape (n,k)
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : positive integer
The number of points at which the algorithm switches over to
brute-force.
"""
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize<1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
pass
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx)<=self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
#maxes = np.amax(data,axis=0)
#mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval==minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data<=split)[0]
greater_idx = np.nonzero(data>split)[0]
if len(less_idx)==0:
split = np.amin(data)
less_idx = np.nonzero(data<=split)[0]
greater_idx = np.nonzero(data>split)[0]
if len(greater_idx)==0:
split = np.amax(data)
less_idx = np.nonzero(data<split)[0]
greater_idx = np.nonzero(data>=split)[0]
if len(less_idx)==0:
# _still_ zero? all must have the same value
assert np.all(data==data[0]), "Troublesome data array: %s" % data
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p!=np.inf:
side_distances**=p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps==0:
epsfac=1
elif p==np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p!=np.inf and distance_upper_bound!=np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i]<distance_upper_bound:
if len(neighbors)==k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors)==k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance>distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim]<node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance<=distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p==np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""query the kd-tree for nearest neighbors
Parameters:
===========
x : array-like, last dimension self.m
An array of points to query.
k : integer
The number of nearest neighbors to return.
eps : nonnegative float
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns:
========
d : array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p<1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape!=():
if k>1:
dd = np.empty(retshape+(k,),dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=np.int)
ii.fill(self.n)
elif k==1:
dd = np.empty(retshape,dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=np.int)
ii.fill(self.n)
elif k is None:
dd = np.empty(retshape,dtype=np.object)
ii = np.empty(retshape,dtype=np.object)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, p=p, distance_upper_bound=distance_upper_bound)
if k>1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k==1:
if len(hits)>0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
elif k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
return dd, ii
else:
hits = self.__query(x, k=k, p=p, distance_upper_bound=distance_upper_bound)
if k==1:
if len(hits)>0:
return hits[0]
else:
return np.inf, self.n
elif k>1:
dd = np.empty(k,dtype=np.float)
dd.fill(np.inf)
ii = np.empty(k,dtype=np.int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
elif k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x,p)>=r/(1.+eps):
return []
elif rect.max_distance_point(x,p)<r*(1.+eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d,x,p)<=r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less)+traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less)+traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within r of x
Parameters
==========
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of
r : positive float
The radius of points to return
p : float 1<=p<=infinity
Which Minkowski p-norm to use
eps : nonnegative float
Approximate search. Branches of the tree are not explored
if their nearest points are further than r/(1+eps), and branches
are added in bulk if their furthest points are nearer than r*(1+eps).
Returns
=======
results : list or array of lists
If x is a single point, returns a list of the indices of the neighbors
of x. If x is an array of points, returns an object array of shape tuple
containing lists of neighbors.
Note: if you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using query_ball_tree.
"""
x = np.asarray(x)
if x.shape[-1]!=self.m:
raise ValueError("Searching for a %d-dimensional point in a %d-dimensional KDTree" % (x.shape[-1],self.m))
if len(x.shape)==1:
return self.__query_ball_point(x,r,p,eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape,dtype=np.object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
==========
other : KDTree
The tree containing points to search against
r : positive float
The maximum distance
p : float 1<=p<=infinity
Which Minkowski norm to use
eps : nonnegative float
Approximate search. Branches of the tree are not explored
if their nearest points are further than r/(1+eps), and branches
are added in bulk if their furthest points are nearer than r*(1+eps).
Returns
=======
results : list of lists
For each element self.data[i] of this tree, results[i] is a list of the
indices of its neighbors in other.data.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p)>r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p)<r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p)<=r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from other, and where distance(x1,x2,p)<=r.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
==========
other : KDTree
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with a single
tree traversal.
p : float, 1<=p<=infinity
Which Minkowski p-norm to use
Returns
=======
result : integer or one-dimensional array of integers
The number of pairs. Note that this is internally stored in a numpy int,
and so may overflow if very large (two billion).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx]>max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r<=r[idx]) & (r[idx]<=max_r)]
if len(idx)==0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r))==1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
###############################################################
# Commented this routine out because of scipy.sparse dependence
# -rdh
###############################################################
# def sparse_distance_matrix(self, other, max_distance, p=2.):
# """Compute a sparse distance matrix
#
# Computes a distance matrix between two KDTrees, leaving as zero
# any distance greater than max_distance.
#
# Parameters
# ==========
#
# other : KDTree
#
# max_distance : positive float
#
# Returns
# =======
#
# result : dok_matrix
# Sparse matrix representing the results in "dictionary of keys" format.
# """
# result = scipy.sparse.dok_matrix((self.n,other.n))
#
# def traverse(node1, rect1, node2, rect2):
# if rect1.min_distance_rectangle(rect2, p)>max_distance:
# return
# elif isinstance(node1, KDTree.leafnode):
# if isinstance(node2, KDTree.leafnode):
# for i in node1.idx:
# for j in node2.idx:
# d = minkowski_distance(self.data[i],other.data[j],p)
# if d<=max_distance:
# result[i,j] = d
# else:
# less, greater = rect2.split(node2.split_dim, node2.split)
# traverse(node1,rect1,node2.less,less)
# traverse(node1,rect1,node2.greater,greater)
# elif isinstance(node2, KDTree.leafnode):
# less, greater = rect1.split(node1.split_dim, node1.split)
# traverse(node1.less,less,node2,rect2)
# traverse(node1.greater,greater,node2,rect2)
# else:
# less1, greater1 = rect1.split(node1.split_dim, node1.split)
# less2, greater2 = rect2.split(node2.split_dim, node2.split)
# traverse(node1.less,less1,node2.less,less2)
# traverse(node1.less,less1,node2.greater,greater2)
# traverse(node1.greater,greater1,node2.less,less2)
# traverse(node1.greater,greater1,node2.greater,greater2)
# traverse(self.tree, Rectangle(self.maxes, self.mins),
# other.tree, Rectangle(other.maxes, other.mins))
#
# return result
def distance_matrix(x,y,p=2,threshold=1000000):
"""Compute the distance matrix.
Computes the matrix of all pairwise distances.
Parameters
==========
x : array-like, m by k
y : array-like, n by k
p : float 1<=p<=infinity
Which Minkowski p-norm to use.
threshold : positive integer
If m*n*k>threshold use a python loop instead of creating
a very large temporary.
Returns
=======
result : array-like, m by n
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=np.float) #FIXME: figure out the best dtype
if m<n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_virtual_address
short_description: Manage LTM virtual addresses on a BIG-IP.
description:
- Manage LTM virtual addresses on a BIG-IP.
version_added: "2.4"
options:
address:
description:
- Virtual address. This value cannot be modified after it is set.
required: True
aliases:
- name
netmask:
description:
- Netmask of the provided virtual address. This value cannot be
modified after it is set.
default: 255.255.255.255
connection_limit:
description:
- Specifies the number of concurrent connections that the system
allows on this virtual address.
arp_state:
description:
- Specifies whether the system accepts ARP requests. When (disabled),
specifies that the system does not accept ARP requests. Note that
both ARP and ICMP Echo must be disabled in order for forwarding
virtual servers using that virtual address to forward ICMP packets.
If (enabled), then the packets are dropped.
choices:
- enabled
- disabled
auto_delete:
description:
- Specifies whether the system automatically deletes the virtual
address with the deletion of the last associated virtual server.
When C(disabled), specifies that the system leaves the virtual
address even when all associated virtual servers have been deleted.
When creating the virtual address, the default value is C(enabled).
choices:
- enabled
- disabled
icmp_echo:
description:
- Specifies how the systems sends responses to (ICMP) echo requests
on a per-virtual address basis for enabling route advertisement.
When C(enabled), the BIG-IP system intercepts ICMP echo request
packets and responds to them directly. When C(disabled), the BIG-IP
system passes ICMP echo requests through to the backend servers.
When (selective), causes the BIG-IP system to internally enable or
disable responses based on virtual server state; C(when_any_available),
C(when_all_available, or C(always), regardless of the state of any
virtual servers.
choices:
- enabled
- disabled
- selective
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
advertise_route:
description:
- Specifies what routes of the virtual address the system advertises.
When C(when_any_available), advertises the route when any virtual
server is available. When C(when_all_available), advertises the
route when all virtual servers are available. When (always), always
advertises the route regardless of the virtual servers available.
choices:
- always
- when_all_available
- when_any_available
use_route_advertisement:
description:
- Specifies whether the system uses route advertisement for this
virtual address. When disabled, the system does not advertise
routes for this virtual address.
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host. This is as easy as pip
install netaddr.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add virtual address
bigip_virtual_address:
server: "lb.mydomain.net"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
address: "10.10.10.10"
delegate_to: localhost
- name: Enable route advertisement on the virtual address
bigip_virtual_address:
server: "lb.mydomain.net"
user: "admin"
password: "secret"
state: "present"
address: "10.10.10.10"
use_route_advertisement: yes
delegate_to: localhost
'''
RETURN = '''
use_route_advertisement:
description: The new setting for whether to use route advertising or not.
returned: changed
type: bool
sample: true
auto_delete:
description: New setting for auto deleting virtual address.
returned: changed
type: string
sample: enabled
icmp_echo:
description: New ICMP echo setting applied to virtual address.
returned: changed
type: string
sample: disabled
connection_limit:
description: The new connection limit of the virtual address.
returned: changed
type: int
sample: 1000
netmask:
description: The netmask of the virtual address.
returned: created
type: int
sample: 2345
arp_state:
description: The new way the virtual address handles ARP requests.
returned: changed
type: string
sample: disabled
address:
description: The address of the virtual address.
returned: created
type: int
sample: 2345
state:
description: The new state of the virtual address.
returned: changed
type: string
sample: disabled
'''
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
class Parameters(AnsibleF5Parameters):
api_map = {
'routeAdvertisement': 'use_route_advertisement',
'autoDelete': 'auto_delete',
'icmpEcho': 'icmp_echo',
'connectionLimit': 'connection_limit',
'serverScope': 'advertise_route',
'mask': 'netmask',
'arp': 'arp_state'
}
updatables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'arp_state', 'enabled', 'advertise_route'
]
returnables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'netmask', 'arp_state', 'address', 'state'
]
api_attributes = [
'routeAdvertisement', 'autoDelete', 'icmpEcho', 'connectionLimit',
'advertiseRoute', 'arp', 'mask', 'enabled', 'serverScope'
]
@property
def advertise_route(self):
if self._values['advertise_route'] is None:
return None
elif self._values['advertise_route'] in ['any', 'when_any_available']:
return 'any'
elif self._values['advertise_route'] in ['all', 'when_all_available']:
return 'all'
elif self._values['advertise_route'] in ['none', 'always']:
return 'none'
@property
def connection_limit(self):
if self._values['connection_limit'] is None:
return None
return int(self._values['connection_limit'])
@property
def use_route_advertisement(self):
if self._values['use_route_advertisement'] is None:
return None
elif self._values['use_route_advertisement'] in BOOLEANS_TRUE:
return 'enabled'
elif self._values['use_route_advertisement'] == 'enabled':
return 'enabled'
else:
return 'disabled'
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return 'yes'
elif self._values['enabled'] in BOOLEANS_TRUE:
return 'yes'
elif self._values['state'] == 'disabled':
return 'no'
elif self._values['enabled'] in BOOLEANS_FALSE:
return 'no'
else:
return None
@property
def address(self):
if self._values['address'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['address'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'address' is not a valid IP address"
)
@property
def netmask(self):
if self._values['netmask'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['netmask'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'netmask' is not a valid IP address"
)
@property
def auto_delete(self):
if self._values['auto_delete'] is None:
return None
elif self._values['auto_delete'] in BOOLEANS_TRUE:
return True
elif self._values['auto_delete'] == 'enabled':
return True
else:
return False
@property
def state(self):
if self.enabled == 'yes' and self._values['state'] != 'present':
return 'enabled'
elif self.enabled == 'no':
return 'disabled'
else:
return self._values['state']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def exists(self):
result = self.client.api.tm.ltm.virtual_address_s.virtual_address.exists(
name=self.want.address,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if self.want.netmask is not None:
if self.have.netmask != self.want.netmask:
raise F5ModuleError(
"The netmask cannot be changed. Delete and recreate"
"the virtual address if you need to do this."
)
if self.want.address is not None:
if self.have.address != self.want.address:
raise F5ModuleError(
"The address cannot be changed. Delete and recreate"
"the virtual address if you need to do this."
)
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the virtual address")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.virtual_address_s.virtual_address.create(
name=self.want.address,
partition=self.want.partition,
address=self.want.address,
**params
)
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the virtual address")
return True
def remove_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
address=dict(
type='str',
required=True,
aliases=['name']
),
netmask=dict(
type='str',
default='255.255.255.255',
),
connection_limit=dict(
type='int'
),
arp_state=dict(
choices=['enabled', 'disabled'],
),
auto_delete=dict(
choices=['enabled', 'disabled'],
),
icmp_echo=dict(
choices=['enabled', 'disabled', 'selective'],
),
advertise_route=dict(
choices=['always', 'when_all_available', 'when_any_available'],
),
use_route_advertisement=dict(
type='bool'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic to update a TensorFlow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Quantizable operation types that are supported by the quantization rewrite.
_QUANTIZABLE_TYPES = {'Conv2D', 'MatMul', 'DepthwiseConv2dNative'}
# Activations that are supported by the quantization rewrite.
_ACTIVATION_TYPES = {'Relu', 'Relu6', 'Identity'}
_RELU_TYPES = {'Relu', 'Relu6'}
_QUANTIZATION_OP = {'FakeQuantWithMinMaxVars'}
_VALID_SRC_OP = {'Add', 'Mul'}
_INTERMEDIATE_OP = {'Add', 'Mul'}
_PASS_THROUGH_OP = {'Reshape', 'Identity', 'BatchToSpaceND', 'SpaceToBatchND'}
_VALID_ACTIVATION_OP = {'Relu', 'Relu6'}
def Quantize(graph,
is_training,
weight_bits=8,
activation_bits=8,
symmetric=False,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Updates graph with quantization operations.
Currently we quantize the following tensors:
* Conv/MatMul: Quantize the weights if it matches.
* Activation: Quantize the output if it matches.
* Bypass/Post-activation Bypass: Quantize both input and output
if it matches.
Args:
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
symmetric: (Optional) If true, use symmetric quantization limits instead of
training the minimum and maximum of each quantization range separately.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
if scope and not scope.endswith('/'):
scope += '/'
input_to_ops_map = input_to_ops.InputToOps(graph)
quantized_ops = set()
for layer_match in _FindLayersToQuantize(graph):
# Quantize the weights.
context = _GetContextFromOp(layer_match.layer_op)
# If `scope` is given, only quantize it if the consumer of weights
# (the layer op) is in the right scope.
if layer_match.weight_tensor is not None:
_InsertQuantOp(
context,
'weights_quant',
layer_match.weight_tensor.op,
input_to_ops_map.ConsumerOperations(layer_match.weight_tensor.op),
is_training,
moving_avg=False,
ema_decay=ema_decay,
quant_delay=quant_delay,
narrow_range=True,
vars_collection=vars_collection,
bits=weight_bits,
symmetric=symmetric,
consumer_scope=scope)
# Quantize the activations.
if layer_match.activation_op is not None:
consumer_ops = input_to_ops_map.ConsumerOperations(
layer_match.activation_op)
add_context = context
if layer_match.bypass_op:
pattern_match_result = re.search(r'^(.*)/([^/]+)', context)
if pattern_match_result is not None:
add_context = pattern_match_result.group(1)
else:
add_context = ''
# If `scope` is given, only quantize it if the producer of weights
# (usually it's the layer op) is in the right scope.
_InsertQuantOp(
add_context,
'act_quant',
layer_match.activation_op,
consumer_ops,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
init_min=0.0,
producer_scope=scope)
quantized_ops.add(layer_match.activation_op)
# Quantize the inputs and output to the bypass (if it exists). The input to
# the bypass is the bias add, and the output is the activation.
if layer_match.bypass_op is not None:
# If `scope` is given, only quantize it if the both the producer and the
# consumer are in the right scope.
_InsertQuantOp(
context,
'conv_quant',
layer_match.bias_add_op,
input_to_ops_map.ConsumerOperations(layer_match.bias_add_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope,
consumer_scope=scope)
quantized_ops.add(layer_match.bias_add_op)
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(layer_match.bypass_op)
if any(consumer.type in _ACTIVATION_TYPES for consumer in consumers):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.bypass_op.name)
else:
_InsertQuantOp(
add_context,
'add_quant',
layer_match.bypass_op,
input_to_ops_map.ConsumerOperations(layer_match.bypass_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope,
consumer_scope=scope)
quantized_ops.add(layer_match.bypass_op)
# Quantize bypass ops that occur after the activation.
if layer_match.post_activation_bypass_op is not None:
pattern_match_result = re.search(
r'^(.*)/([^/]+)', layer_match.post_activation_bypass_op.name)
if pattern_match_result is not None:
post_activation_bypass_context = pattern_match_result.group(1)
else:
post_activation_bypass_context = ''
# If `scope` is given, only quantize it if the producer is in the right
# scope.
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(
layer_match.post_activation_bypass_op)
if any(consumer.type in _RELU_TYPES for consumer in consumers):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.post_activation_bypass_op.name)
else:
_InsertQuantOp(
post_activation_bypass_context,
'post_activation_bypass_quant',
layer_match.post_activation_bypass_op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope)
quantized_ops.add(layer_match.post_activation_bypass_op)
_QuantizeActivationLayers(
quantized_ops,
graph,
is_training,
activation_bits,
ema_decay,
quant_delay,
vars_collection,
scope=scope)
def _QuantizeActivationLayers(quantized_ops,
graph,
is_training,
activation_bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Quantize intermediate activation tensors after addition and multiplication.
Args:
quantized_ops: Set of previously quantized activation ops.
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
activation_bits: Number of bits to use for quantizing activations.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which are
in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for op in (op for op in graph.get_operations()):
if _CheckIfQuantizableOp(op, quantized_ops):
logging.info('Inserting fake quant op activation_%s_quant after %s',
op.type, op.name)
consumers = input_to_ops_map.ConsumerOperations(op)
_InsertQuantOp(
op.name,
'activation_' + op.type + '_quant',
op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope)
def _CheckIfQuantizableOp(src_op, quantized_ops):
"""Check if the output of an op should be quantized.
Args:
src_op: op to be checked
quantized_ops: Set of previously quantized activation ops.
Returns:
Boolean specifying if output should be quantized or not.
"""
src_op_name = set([src_op.type])
if src_op in quantized_ops:
return False
if not src_op_name.intersection(_VALID_SRC_OP):
return False
# If src op is an add or a mul and the output is immediately
# followed by an activation skip
if len(src_op.outputs) == 1 and len(src_op.outputs[0].consumers()) == 1:
op_consumers = src_op.outputs[0].consumers()
if set([op_consumers[0].type]).intersection(_VALID_ACTIVATION_OP):
logging.info('Skipping quant after %s', src_op.name)
return False
# Is an Add or a Mul
input_ops = src_op.inputs
for op in input_ops:
curr_op = op.op
curr_op_type = set([curr_op.type])
while curr_op_type.intersection(_PASS_THROUGH_OP):
# Walk back through pass through ops
curr_op = curr_op.inputs[0].op
curr_op_type = set([curr_op.type])
# Now at a valid or quantizable op, need to check if
# atleast one of the inputs to a valid op is connected
# to a quantizable op via pass through ops
if (curr_op_type.intersection(_QUANTIZATION_OP) or
curr_op.name.find('delayed_quant/Merge') > 0):
return True
if curr_op_type.intersection(_INTERMEDIATE_OP):
# Check if atleast one input to intermediate_op are quantizable
for input_op in curr_op.inputs:
if _CheckIfQuantizableOp(input_op.op, quantized_ops):
return True
return False
def _FindLayersToQuantize(graph):
"""Matches layers in graph to quantize.
The following patterns get matched. Nodes surrounded by [] will be
optionally matched:
weight|folded_weight
/
conv|fc
|
[batch_to_space_nd]
|
[post_conv_correction]
|
[biasadd|folded_bias]
|
[bypass]
|
activation
|
[post_activation_bypass]
Match replacements:
If weight|folded_weight is found, FakeQuant is added afterwards.
If bypass is found, FakeQuant is added before and after.
If activation is found, FakeQuant is added afterwards.
If post_activation_bypass is found, FakeQuant is added afterwards.
Args:
graph: Graph to perform match on.
Returns:
list of _LayerMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_var_pattern = graph_matcher.OpTypePattern('Variable|VariableV2')
weight_partition_identity_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[weight_var_pattern])
weight_partition_concat_pattern = graph_matcher.OpTypePattern(
'ConcatV2', inputs=[weight_partition_identity_pattern, '*', '*'])
weight_identity_pattern = graph_matcher.OpTypePattern(
'Identity',
inputs=[
graph_matcher.OneofPattern([
weight_partition_identity_pattern,
weight_partition_concat_pattern,
weight_var_pattern,
])
])
weight_resource_var_pattern = graph_matcher.OpTypePattern('ReadVariableOp')
folded_weight_pattern = graph_matcher.OpTypePattern('Mul')
# The weights inputs to the layer operation can either be from the Variable or
# the folded weight (Mul).
layer_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
input_pattern,
graph_matcher.OneofPattern([
weight_identity_pattern, weight_resource_var_pattern,
folded_weight_pattern
])
],
ordered_inputs=False)
# For atrous convolutions a BatchToSpaceND will occur after the depthwise
# convolution.
batch_to_space_pattern = graph_matcher.OpTypePattern(
'BatchToSpaceND',
inputs=[
layer_pattern,
graph_matcher.OpTypePattern('*'),
graph_matcher.OpTypePattern('*')
])
layer_output_pattern = graph_matcher.OneofPattern(
[batch_to_space_pattern, layer_pattern])
# For separable convolutions, we are looking for a conv, followed by a conv
# with no activations between the two.
sep_conv_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
graph_matcher.OneofPattern([layer_output_pattern]),
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul',
inputs=[graph_matcher.OpTypePattern('*'), layer_output_pattern],
ordered_inputs=False)
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[folded_bias_mul_pattern,
graph_matcher.OpTypePattern('*')],
ordered_inputs=False)
folded_bias_add_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
post_layer_op_correction_pattern,
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
# batch_norms with forced updates have an Identity operation at the end.
# TODO(suharshs): Find a way to easily skip extra Identity operations. The
# current issue is that doing so can often match patterns across many layers
# incorrectly.
batch_norm_identity = graph_matcher.OpTypePattern(
'Identity', inputs=[folded_bias_add_pattern])
bias_add_pattern = graph_matcher.OpTypePattern(
'Add|BiasAdd', inputs=[layer_output_pattern, '*'], ordered_inputs=False)
# The bias can come from the bias add or the folded bias add.
bypass_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern, batch_norm_identity]),
'*'
],
ordered_inputs=False)
# The input to the activation can come from bias add, fold bias add, the
# bypasses.
# TODO(suharshs): We should ideally skip Identity operations instead of
# treating them as activations.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES) + '|Identity',
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern,
folded_bias_add_pattern,
batch_norm_identity,
bypass_pattern,
layer_pattern,
])
])
post_activation_bypass_pattern = graph_matcher.OpTypePattern(
'Add', inputs=['*', activation_pattern], ordered_inputs=False)
# The order of the following matching blocks is very important. Since matches
# aren't guaranteed to be disjoint, we structure matches from largest to
# smallest to guarantee that the largest match always wins. Additionally, we
# ensure that we don't match layers multiple times.
layer_matches = []
# We use matched_layer_set to ensure that layers aren't matched multiple
# times.
matched_layer_set = set()
# First, we match layers that have a post activation bypass. We do this first
# to ensure we don't match only the first part of this layer, missing the
# post activation bypass node.
post_activation_bypass_layer_matcher = graph_matcher.GraphMatcher(
post_activation_bypass_pattern)
for match_result in post_activation_bypass_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
post_activation_bypass_op = match_result.get_op(
post_activation_bypass_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op))
# Now, we match the basic layer ending at an activation. We may get duplicate
# matches from above, but we don't add them to layer_matches.
layer_matcher = graph_matcher.GraphMatcher(activation_pattern)
for match_result in layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
if layer_op not in matched_layer_set:
if not _IsSkipLayer(activation_op):
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op, None,
bias_add_op))
# Match the final layer, where there may not be an activation and instead
# the output of the final BiasAdd must be quantized. So we treat the BiasAdd
# as the 'activation_op' in the _LayerMatch, to ensure that it's output is
# quantized.
final_layer_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern([bias_add_pattern, folded_bias_add_pattern]))
for match_result in final_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(bias_add_pattern)
if activation_op is None:
activation_op = match_result.get_op(folded_bias_add_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
# Look for separable convolutions here
sep_conv_matcher = graph_matcher.GraphMatcher(sep_conv_pattern)
for match_result in sep_conv_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
activation_op = match_result.get_op(layer_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
return layer_matches
def _IsSkipLayer(activation_op):
"""Skip quantizing conv->identity->Batch norm layers.
Args:
activation_op: Activation op detected by layer matching pattern
Returns:
skip_layer: boolean, true when conv->identity->batch norm is detected.
"""
# Exclude quantization of conv->identity->BN,
# After folding, this part corresponds to estimation of mean and variance
# and should not be quantized.
skip_layer = False
if activation_op.type == 'Identity' and len(activation_op.outputs) == 1:
if len(activation_op.outputs[0].consumers()) == 1:
consumer = activation_op.outputs[0].consumers()[0]
if consumer.type in ['FusedBatchNorm', 'FusedBatchNormV3']:
skip_layer = True
logging.info(
'Skipping quantizing %s, because it is the output of a conv/fc '
'followed by a identity, feeding a fused batch norm.',
activation_op.name)
return skip_layer
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
def __init__(self, layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op):
self._layer_op = layer_op
self._weight_tensor = weight_tensor
self._activation_op = activation_op
self._bypass_op = bypass_op
self._post_activation_bypass_op = post_activation_bypass_op
self._bias_add_op = bias_add_op
@property
def layer_op(self):
return self._layer_op
@property
def weight_tensor(self):
return self._weight_tensor
@property
def activation_op(self):
return self._activation_op
@property
def bypass_op(self):
return self._bypass_op
@property
def post_activation_bypass_op(self):
return self._post_activation_bypass_op
@property
def bias_add_op(self):
return self._bias_add_op
def _FollowedByFakeQuant(tensor):
"""Returns True if the tensor is followed by a FakeQuant."""
fake_quant_ops = set([
'FakeQuantWithMinMaxVars', 'FakeQuantWithMinMaxArgs',
'FakeQuantWithMinMaxVarsPerChannel'
])
pass_through_ops = set(['Reshape', 'Identity'])
consumers = tensor.consumers()
while consumers:
c = consumers.pop()
if c.type in fake_quant_ops:
return True
elif c.type in pass_through_ops:
for output in c.outputs:
consumers.extend(output.consumers())
return False
def _InsertQuantOp(context,
name,
producer,
consumers,
is_training,
moving_avg=True,
init_min=-6.0,
init_max=6.0,
bits=8,
symmetric=False,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
narrow_range=False,
producer_scope=None,
consumer_scope=None):
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
context: Context where producer and consumer operations are nested.
name: Name for the new quantization op within the context.
producer: Producer operation of the pairs where quantization will be
inserted.
consumers: Consumer operations of the pairs.
is_training: Whether quantizing training graph or eval graph.
moving_avg: Specifies whether to use exponential moving average or just
the last value seen.
init_min: Starting minimum value for the new quantization op.
init_max: Starting maximum value for the new quantization op.
bits: Number of bits to use for quantization, must be between 2 and 8.
symmetric: (Optional) If true, use symmetric quantization limits instead of
training the minimum and maximum of each quantization range separately.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
narrow_range: Whether to use the narrow quantization range
[1; 2^bits - 1] or wide range [0; 2^bits - 1].
producer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when the producer is in this scope.
consumer_scope: The restriction of consumer scope. If not None, the new op
will be inserted only when all the consumers are in this scope.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
if producer_scope and not producer.name.startswith(producer_scope):
logging.info(
'_InsertQuantOp ignores context="%s" name="%s" '
'because producer "%s" is not in scope "%s"',
context, name, producer.name, producer_scope)
return
if consumer_scope:
consumers_in_scope = []
for consumer in consumers:
if consumer.name.startswith(consumer_scope):
consumers_in_scope.append(consumer)
else:
logging.info(
'_InsertQuantOp context="%s" name="%s" ignores '
'consumer "%s" because it is not in scope "%s"',
context, name, consumer.name, consumer_scope)
return
consumers = consumers_in_scope
name_prefix = _AddContextToName(context, name)
# This is needed on TPU where name_scope == 'TPUReplicate/loop', and
# name_prefix starts with 'TPUReplicate/loop/'; without dropping it
# variables are created as TPUReplicate/loop/TPUReplicate/loop/..., which
# breaks things later.
name_scope = ops.get_name_scope()
if name_scope:
name_prefix = common.DropStringPrefix(name_prefix, name_scope + '/')
inputs = producer.outputs[0]
# Prevent ops from being quantized multiple times. Bypass ops can sometimes
# overlap between multiple matches, so we need to ensure that we don't
# add duplicate FakeQuant operations.
if _FollowedByFakeQuant(inputs):
return
if moving_avg:
quant = (
quant_ops.MovingAvgQuantize(
inputs,
init_min=init_min,
init_max=init_max,
ema_decay=ema_decay,
is_training=is_training,
num_bits=bits,
symmetric=symmetric,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
else:
quant = (
quant_ops.LastValueQuantize(
inputs,
init_min=init_min,
init_max=init_max,
is_training=is_training,
num_bits=bits,
symmetric=symmetric,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = common.RerouteTensor(
quant, inputs, can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
def _GetContextFromOp(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _AddContextToName(context, name):
"""Adds the context to the name if it exists."""
if not context:
return name
return context + '/' + name
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.template}
"""
from cStringIO import StringIO
from zope.interface.verify import verifyObject
from twisted.internet.defer import succeed, gatherResults
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from twisted.web.template import (
Element, TagLoader, renderer, tags, XMLFile, XMLString)
from twisted.web.iweb import ITemplateLoader
from twisted.web.error import (FlattenerError, MissingTemplateLoader,
MissingRenderMethod)
from twisted.web.template import renderElement
from twisted.web._element import UnexposedMethodError
from twisted.web.test._util import FlattenTestCase
from twisted.web.test.test_web import DummyRequest
from twisted.web.server import NOT_DONE_YET
class TagFactoryTests(TestCase):
"""
Tests for L{_TagFactory} through the publicly-exposed L{tags} object.
"""
def test_lookupTag(self):
"""
HTML tags can be retrieved through C{tags}.
"""
tag = tags.a
self.assertEqual(tag.tagName, "a")
def test_lookupHTML5Tag(self):
"""
Twisted supports the latest and greatest HTML tags from the HTML5
specification.
"""
tag = tags.video
self.assertEqual(tag.tagName, "video")
def test_lookupTransparentTag(self):
"""
To support transparent inclusion in templates, there is a special tag,
the transparent tag, which has no name of its own but is accessed
through the "transparent" attribute.
"""
tag = tags.transparent
self.assertEqual(tag.tagName, "")
def test_lookupInvalidTag(self):
"""
Invalid tags which are not part of HTML cause AttributeErrors when
accessed through C{tags}.
"""
self.assertRaises(AttributeError, getattr, tags, "invalid")
def test_lookupXMP(self):
"""
As a special case, the <xmp> tag is simply not available through
C{tags} or any other part of the templating machinery.
"""
self.assertRaises(AttributeError, getattr, tags, "xmp")
class ElementTests(TestCase):
"""
Tests for the awesome new L{Element} class.
"""
def test_missingTemplateLoader(self):
"""
L{Element.render} raises L{MissingTemplateLoader} if the C{loader}
attribute is C{None}.
"""
element = Element()
err = self.assertRaises(MissingTemplateLoader, element.render, None)
self.assertIdentical(err.element, element)
def test_missingTemplateLoaderRepr(self):
"""
A L{MissingTemplateLoader} instance can be repr()'d without error.
"""
class PrettyReprElement(Element):
def __repr__(self):
return 'Pretty Repr Element'
self.assertIn('Pretty Repr Element',
repr(MissingTemplateLoader(PrettyReprElement())))
def test_missingRendererMethod(self):
"""
When called with the name which is not associated with a render method,
L{Element.lookupRenderMethod} raises L{MissingRenderMethod}.
"""
element = Element()
err = self.assertRaises(
MissingRenderMethod, element.lookupRenderMethod, "foo")
self.assertIdentical(err.element, element)
self.assertEqual(err.renderName, "foo")
def test_missingRenderMethodRepr(self):
"""
A L{MissingRenderMethod} instance can be repr()'d without error.
"""
class PrettyReprElement(Element):
def __repr__(self):
return 'Pretty Repr Element'
s = repr(MissingRenderMethod(PrettyReprElement(),
'expectedMethod'))
self.assertIn('Pretty Repr Element', s)
self.assertIn('expectedMethod', s)
def test_definedRenderer(self):
"""
When called with the name of a defined render method,
L{Element.lookupRenderMethod} returns that render method.
"""
class ElementWithRenderMethod(Element):
@renderer
def foo(self, request, tag):
return "bar"
foo = ElementWithRenderMethod().lookupRenderMethod("foo")
self.assertEqual(foo(None, None), "bar")
def test_render(self):
"""
L{Element.render} loads a document from the C{loader} attribute and
returns it.
"""
class TemplateLoader(object):
def load(self):
return "result"
class StubElement(Element):
loader = TemplateLoader()
element = StubElement()
self.assertEqual(element.render(None), "result")
def test_misuseRenderer(self):
"""
If the L{renderer} decorator is called without any arguments, it will
raise a comprehensible exception.
"""
te = self.assertRaises(TypeError, renderer)
self.assertEqual(str(te),
"expose() takes at least 1 argument (0 given)")
def test_renderGetDirectlyError(self):
"""
Called directly, without a default, L{renderer.get} raises
L{UnexposedMethodError} when it cannot find a renderer.
"""
self.assertRaises(UnexposedMethodError, renderer.get, None,
"notARenderer")
class XMLFileReprTests(TestCase):
"""
Tests for L{twisted.web.template.XMLFile}'s C{__repr__}.
"""
def test_filePath(self):
"""
An L{XMLFile} with a L{FilePath} returns a useful repr().
"""
path = FilePath("/tmp/fake.xml")
self.assertEqual('<XMLFile of %r>' % (path,), repr(XMLFile(path)))
def test_filename(self):
"""
An L{XMLFile} with a filename returns a useful repr().
"""
fname = "/tmp/fake.xml"
self.assertEqual('<XMLFile of %r>' % (fname,), repr(XMLFile(fname)))
def test_file(self):
"""
An L{XMLFile} with a file object returns a useful repr().
"""
fobj = StringIO("not xml")
self.assertEqual('<XMLFile of %r>' % (fobj,), repr(XMLFile(fobj)))
class XMLLoaderTestsMixin(object):
"""
@ivar templateString: Simple template to use to excercise the loaders.
@ivar deprecatedUse: C{True} if this use of L{XMLFile} is deprecated and
should emit a C{DeprecationWarning}.
"""
loaderFactory = None
templateString = '<p>Hello, world.</p>'
def test_load(self):
"""
Verify that the loader returns a tag with the correct children.
"""
loader = self.loaderFactory()
tag, = loader.load()
warnings = self.flushWarnings(offendingFunctions=[self.loaderFactory])
if self.deprecatedUse:
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Passing filenames or file objects to XMLFile is "
"deprecated since Twisted 12.1. Pass a FilePath instead.")
else:
self.assertEqual(len(warnings), 0)
self.assertEqual(tag.tagName, 'p')
self.assertEqual(tag.children, [u'Hello, world.'])
def test_loadTwice(self):
"""
If {load()} can be called on a loader twice the result should be the
same.
"""
loader = self.loaderFactory()
tags1 = loader.load()
tags2 = loader.load()
self.assertEqual(tags1, tags2)
class XMLStringLoaderTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLString}
"""
deprecatedUse = False
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with C{self.templateString}.
"""
return XMLString(self.templateString)
class XMLFileWithFilePathTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s L{FilePath} support.
"""
deprecatedUse = False
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a L{FilePath} pointing to a
file that contains C{self.templateString}.
"""
fp = FilePath(self.mktemp())
fp.setContent(self.templateString)
return XMLFile(fp)
class XMLFileWithFileTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s deprecated file object support.
"""
deprecatedUse = True
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a file object that contains
C{self.templateString}.
"""
return XMLFile(StringIO(self.templateString))
class XMLFileWithFilenameTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s deprecated filename support.
"""
deprecatedUse = True
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a filename that points to a
file containing C{self.templateString}.
"""
fp = FilePath(self.mktemp())
fp.setContent(self.templateString)
return XMLFile(fp.path)
class FlattenIntegrationTests(FlattenTestCase):
"""
Tests for integration between L{Element} and
L{twisted.web._flatten.flatten}.
"""
def test_roundTrip(self):
"""
Given a series of parsable XML strings, verify that
L{twisted.web._flatten.flatten} will flatten the L{Element} back to the
input when sent on a round trip.
"""
fragments = [
"<p>Hello, world.</p>",
"<p><!-- hello, world --></p>",
"<p><![CDATA[Hello, world.]]></p>",
'<test1 xmlns:test2="urn:test2">'
'<test2:test3></test2:test3></test1>',
'<test1 xmlns="urn:test2"><test3></test3></test1>',
'<p>\xe2\x98\x83</p>',
]
deferreds = [
self.assertFlattensTo(Element(loader=XMLString(xml)), xml)
for xml in fragments]
return gatherResults(deferreds)
def test_entityConversion(self):
"""
When flattening an HTML entity, it should flatten out to the utf-8
representation if possible.
"""
element = Element(loader=XMLString('<p>☃</p>'))
return self.assertFlattensTo(element, '<p>\xe2\x98\x83</p>')
def test_missingTemplateLoader(self):
"""
Rendering a Element without a loader attribute raises the appropriate
exception.
"""
return self.assertFlatteningRaises(Element(), MissingTemplateLoader)
def test_missingRenderMethod(self):
"""
Flattening an L{Element} with a C{loader} which has a tag with a render
directive fails with L{FlattenerError} if there is no available render
method to satisfy that directive.
"""
element = Element(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="unknownMethod" />
"""))
return self.assertFlatteningRaises(element, MissingRenderMethod)
def test_transparentRendering(self):
"""
A C{transparent} element should be eliminated from the DOM and rendered as
only its children.
"""
element = Element(loader=XMLString(
'<t:transparent '
'xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'Hello, world.'
'</t:transparent>'
))
return self.assertFlattensTo(element, "Hello, world.")
def test_attrRendering(self):
"""
An Element with an attr tag renders the vaule of its attr tag as an
attribute of its containing tag.
"""
element = Element(loader=XMLString(
'<a xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'<t:attr name="href">http://example.com</t:attr>'
'Hello, world.'
'</a>'
))
return self.assertFlattensTo(element,
'<a href="http://example.com">Hello, world.</a>')
def test_errorToplevelAttr(self):
"""
A template with a toplevel C{attr} tag will not load; it will raise
L{AssertionError} if you try.
"""
self.assertRaises(
AssertionError,
XMLString,
"""<t:attr
xmlns:t='http://twistedmatrix.com/ns/twisted.web.template/0.1'
name='something'
>hello</t:attr>
""")
def test_errorUnnamedAttr(self):
"""
A template with an C{attr} tag with no C{name} attribute will not load;
it will raise L{AssertionError} if you try.
"""
self.assertRaises(
AssertionError,
XMLString,
"""<html><t:attr
xmlns:t='http://twistedmatrix.com/ns/twisted.web.template/0.1'
>hello</t:attr></html>""")
def test_lenientPrefixBehavior(self):
"""
If the parser sees a prefix it doesn't recognize on an attribute, it
will pass it on through to serialization.
"""
theInput = (
'<hello:world hello:sample="testing" '
'xmlns:hello="http://made-up.example.com/ns/not-real">'
'This is a made-up tag.</hello:world>')
element = Element(loader=XMLString(theInput))
self.assertFlattensTo(element, theInput)
def test_deferredRendering(self):
"""
An Element with a render method which returns a Deferred will render
correctly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return succeed("Hello, world.")
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod">
Goodbye, world.
</p>
"""))
return self.assertFlattensTo(element, "Hello, world.")
def test_loaderClassAttribute(self):
"""
If there is a non-None loader attribute on the class of an Element
instance but none on the instance itself, the class attribute is used.
"""
class SubElement(Element):
loader = XMLString("<p>Hello, world.</p>")
return self.assertFlattensTo(SubElement(), "<p>Hello, world.</p>")
def test_directiveRendering(self):
"""
An Element with a valid render directive has that directive invoked and
the result added to the output.
"""
renders = []
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
renders.append((self, request))
return tag("Hello, world.")
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod" />
"""))
return self.assertFlattensTo(element, "<p>Hello, world.</p>")
def test_directiveRenderingOmittingTag(self):
"""
An Element with a render method which omits the containing tag
successfully removes that tag from the output.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return "Hello, world."
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod">
Goodbye, world.
</p>
"""))
return self.assertFlattensTo(element, "Hello, world.")
def test_elementContainingStaticElement(self):
"""
An Element which is returned by the render method of another Element is
rendered properly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return tag(Element(
loader=XMLString("<em>Hello, world.</em>")))
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod" />
"""))
return self.assertFlattensTo(element, "<p><em>Hello, world.</em></p>")
def test_elementUsingSlots(self):
"""
An Element which is returned by the render method of another Element is
rendered properly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return tag.fillSlots(test2='world.')
element = RenderfulElement(loader=XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"'
' t:render="renderMethod">'
'<t:slot name="test1" default="Hello, " />'
'<t:slot name="test2" />'
'</p>'
))
return self.assertFlattensTo(element, "<p>Hello, world.</p>")
def test_elementContainingDynamicElement(self):
"""
Directives in the document factory of a Element returned from a render
method of another Element are satisfied from the correct object: the
"inner" Element.
"""
class OuterElement(Element):
@renderer
def outerMethod(self, request, tag):
return tag(InnerElement(loader=XMLString("""
<t:ignored
xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="innerMethod" />
""")))
class InnerElement(Element):
@renderer
def innerMethod(self, request, tag):
return "Hello, world."
element = OuterElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="outerMethod" />
"""))
return self.assertFlattensTo(element, "<p>Hello, world.</p>")
def test_sameLoaderTwice(self):
"""
Rendering the output of a loader, or even the same element, should
return different output each time.
"""
sharedLoader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'<t:transparent t:render="classCounter" /> '
'<t:transparent t:render="instanceCounter" />'
'</p>')
class DestructiveElement(Element):
count = 0
instanceCount = 0
loader = sharedLoader
@renderer
def classCounter(self, request, tag):
DestructiveElement.count += 1
return tag(str(DestructiveElement.count))
@renderer
def instanceCounter(self, request, tag):
self.instanceCount += 1
return tag(str(self.instanceCount))
e1 = DestructiveElement()
e2 = DestructiveElement()
self.assertFlattensImmediately(e1, "<p>1 1</p>")
self.assertFlattensImmediately(e1, "<p>2 2</p>")
self.assertFlattensImmediately(e2, "<p>3 1</p>")
class TagLoaderTests(FlattenTestCase):
"""
Tests for L{TagLoader}.
"""
def setUp(self):
self.loader = TagLoader(tags.i('test'))
def test_interface(self):
"""
An instance of L{TagLoader} provides L{ITemplateLoader}.
"""
self.assertTrue(verifyObject(ITemplateLoader, self.loader))
def test_loadsList(self):
"""
L{TagLoader.load} returns a list, per L{ITemplateLoader}.
"""
self.assertIsInstance(self.loader.load(), list)
def test_flatten(self):
"""
L{TagLoader} can be used in an L{Element}, and flattens as the tag used
to construct the L{TagLoader} would flatten.
"""
e = Element(self.loader)
self.assertFlattensImmediately(e, '<i>test</i>')
class TestElement(Element):
"""
An L{Element} that can be rendered successfully.
"""
loader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'Hello, world.'
'</p>')
class TestFailureElement(Element):
"""
An L{Element} that can be used in place of L{FailureElement} to verify
that L{renderElement} can render failures properly.
"""
loader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'I failed.'
'</p>')
def __init__(self, failure, loader=None):
self.failure = failure
class FailingElement(Element):
"""
An element that raises an exception when rendered.
"""
def render(self, request):
a = 42
b = 0
return a / b
class FakeSite(object):
"""
A minimal L{Site} object that we can use to test displayTracebacks
"""
displayTracebacks = False
class TestRenderElement(TestCase):
"""
Test L{renderElement}
"""
def setUp(self):
"""
Set up a common L{DummyRequest} and L{FakeSite}.
"""
self.request = DummyRequest([""])
self.request.site = FakeSite()
def test_simpleRender(self):
"""
L{renderElement} returns NOT_DONE_YET and eventually
writes the rendered L{Element} to the request before finishing the
request.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
"".join(self.request.written),
"<!DOCTYPE html>\n"
"<p>Hello, world.</p>")
self.assertTrue(self.request.finished)
d.addCallback(check)
self.assertIdentical(NOT_DONE_YET, renderElement(self.request, element))
return d
def test_simpleFailure(self):
"""
L{renderElement} handles failures by writing a minimal
error message to the request and finishing it.
"""
element = FailingElement()
d = self.request.notifyFinish()
def check(_):
flushed = self.flushLoggedErrors(FlattenerError)
self.assertEqual(len(flushed), 1)
self.assertEqual(
"".join(self.request.written),
('<!DOCTYPE html>\n'
'<div style="font-size:800%;'
'background-color:#FFF;'
'color:#F00'
'">An error occurred while rendering the response.</div>'))
self.assertTrue(self.request.finished)
d.addCallback(check)
self.assertIdentical(NOT_DONE_YET, renderElement(self.request, element))
return d
def test_simpleFailureWithTraceback(self):
"""
L{renderElement} will render a traceback when rendering of
the element fails and our site is configured to display tracebacks.
"""
self.request.site.displayTracebacks = True
element = FailingElement()
d = self.request.notifyFinish()
def check(_):
flushed = self.flushLoggedErrors(FlattenerError)
self.assertEqual(len(flushed), 1)
self.assertEqual(
"".join(self.request.written),
"<!DOCTYPE html>\n<p>I failed.</p>")
self.assertTrue(self.request.finished)
d.addCallback(check)
renderElement(self.request, element, _failElement=TestFailureElement)
return d
def test_nonDefaultDoctype(self):
"""
L{renderElement} will write the doctype string specified by the
doctype keyword argument.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
"".join(self.request.written),
('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
'<p>Hello, world.</p>'))
d.addCallback(check)
renderElement(
self.request,
element,
doctype=(
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'))
return d
def test_noneDoctype(self):
"""
L{renderElement} will not write out a doctype if the doctype keyword
argument is C{None}.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
"".join(self.request.written),
'<p>Hello, world.</p>')
d.addCallback(check)
renderElement(self.request, element, doctype=None)
return d
| |
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("FuzzingWampClient",)
from zope.interface import implementer
from zope.interface.verify import verifyObject, verifyClass
from twisted.internet.defer import returnValue, \
inlineCallbacks
import autobahn
import autobahntestsuite
from autobahn.wamp1.protocol import exportRpc, \
WampServerProtocol, \
WampServerFactory
from interfaces import ITestRunner, ITestDb
from rinterfaces import RITestDb, RITestRunner
from testrun import TestRun, Testee
@implementer(ITestRunner)
@implementer(RITestRunner)
class FuzzingWampClient(object):
"""
A test driver for WAMP test cases.
The test driver takes a test specification and orchestrates the execution of tests
against the set of testees (as specified in the test spec).
"""
MODENAME = 'fuzzingwampclient'
def __init__(self, testDb, debug = False):
assert(verifyObject(ITestDb, testDb))
assert(verifyObject(RITestDb, testDb))
self._testDb = testDb
self._debug = debug
self.dispatch = None
@exportRpc
def run(self, specName, saveResults = True):
return self.runAndObserve(specName, saveResults = saveResults)
@inlineCallbacks
def runAndObserve(self, specName, observers_ = [], saveResults = True):
specId, spec = yield self._testDb.getSpecByName(specName)
casesByTestee = yield self._testDb.generateCasesByTestee(specId)
_observers = observers_[:]
#_observers = observers_
## publish WAMP event on test case finished
##
def notify(runId, testRun, testCase, result, remaining):
if testCase:
evt = {
'testee': testRun.testee.name,
'runId': runId,
'index': testCase.index,
'passed': result.passed,
'remaining': remaining
}
topic = "http://api.testsuite.wamp.ws/testrun#onResult"
else:
evt = {
'testee': testRun.testee.name,
'runId': runId
}
topic = "http://api.testsuite.wamp.ws/testrun#onComplete"
self.dispatch(topic, evt)
#if result and not result.passed:
# print topic, evt
if self.dispatch:
_observers.append(notify)
## save test results to test database
##
def save(runId, testRun, testCase, result, remaining):
if testCase:
self._testDb.saveResult(runId, testRun, testCase, result, saveResults)
if saveResults:
_observers.append(save)
testRuns = []
for obj in spec['testees']:
testee = Testee(**obj)
cases = casesByTestee.get(testee.name, [])
if testee.options.has_key('randomize') and testee.options['randomize'] is not None:
randomize = testee.options['randomize']
elif spec.has_key('options') and spec['options'].has_key('randomize') and spec['options']['randomize'] is not None:
randomize = spec['options']['randomize']
else:
randomize = False
testRun = TestRun(testee, cases, randomize = randomize)
testRuns.append(testRun)
runId = yield self._testDb.newRun(specId)
print
print "Autobahn Fuzzing WAMP Client"
print
print "Autobahn Version : %s" % autobahn.version
print "AutobahnTestsuite Version : %s" % autobahntestsuite.version
#print "WAMP Test Cases : %d" % len(self._caseSet.Cases)
print "WAMP Testees : %d" % len(spec["testees"])
print
for testRun in testRuns:
print "%s @ %s : %d test cases prepared" % (testRun.testee.name, testRun.testee.url, testRun.remaining())
print
print
def progress(runId, testRun, testCase, result, remaining):
for obsv in _observers:
try:
obsv(runId, testRun, testCase, result, remaining)
except Exception, e:
print e
if spec.get('parallel', False):
fails, resultIds = yield self._runParallel(runId, spec, testRuns, progress)
else:
fails, resultIds = yield self._runSequential(runId, spec, testRuns, progress)
yield self._testDb.closeRun(runId)
returnValue((runId, resultIds))
@inlineCallbacks
def _runSequential(self, runId, spec, testRuns, progress):
"""
Execute all test runs sequentially - that is for each
testee (one after another), run the testee's set of
test cases sequentially.
"""
## we cumulate number of test fails and progress() return values
##
fails = 0
progressResults = []
for testRun in testRuns:
while True:
## get next test case _class_ for test run
##
TestCase = testRun.next()
if TestCase:
## run test case, let fire progress() callback and cumulate results
##
try:
testCase = TestCase(testRun.testee, spec)
except Exception, e:
print "ERROR 1", e
else:
try:
result = yield testCase.run()
except Exception, e:
print "ERROR 2", e
if not result.passed:
fails += 1
pres = yield progress(runId, testRun, testCase, result, testRun.remaining())
progressResults.append(pres)
else:
## signal end of test run by firing progress() one last time ..
##
yield progress(runId, testRun, None, None, 0)
break
returnValue((fails, progressResults))
def _runParallel(self, runId, spec, testRuns, progress):
"""
Execute all test runs in parallel - that is run
each testee's set of test cases sequentially
against that testee, but do so for all testees
in parallel.
"""
raise Exception("implement me")
class WsTestWampProtocol(WampServerProtocol):
def onSessionOpen(self):
self.registerForPubSub("http://api.testsuite.wamp.ws", True)
self.registerForRpc(self.factory._testDb, "http://api.testsuite.wamp.ws/testdb/")
self.registerForRpc(self.factory._testRunner, "http://api.testsuite.wamp.ws/testrunner/")
class WsTestWampFactory(WampServerFactory):
protocol = WsTestWampProtocol
def __init__(self, testDb, testRunner, url, debug = False):
assert(verifyObject(ITestDb, testDb))
assert(verifyObject(ITestRunner, testRunner))
WampServerFactory.__init__(self, url, debug = True, debugWamp = True)
self._testDb = testDb
self._testRunner = testRunner
@inlineCallbacks
def startFuzzingWampClient(self, specName):
"""
Start a WAMP fuzzing client test run using a spec previously imported.
"""
testSet = WampCaseSet()
testDb = TestDb([testSet])
testRunner = FuzzingWampClient(testDb)
def progress(runId, testRun, testCase, result, remaining):
if testCase:
print "%s - %s %s (%d tests remaining)" % (testRun.testee.name, "PASSED : " if result.passed else "FAILED : ", testCase.__class__.__name__, remaining)
else:
print "FINISHED : Test run for testee '%s' ended." % testRun.testee.name
runId, resultIds = yield testRunner.runAndObserve(specName, [progress])
print
print "Tests finished: run ID %s, result IDs %d" % (runId, len(resultIds))
print
summary = yield testDb.getTestRunSummary(runId)
tab = Tabify(['l32', 'r5', 'r5'])
print
print tab.tabify(['Testee', 'Pass', 'Fail'])
print tab.tabify()
for t in summary:
print tab.tabify([t['name'], t['passed'], t['failed']])
print
def startImportSpec(self, specFilename):
"""
Import a test specification into the test database.
"""
specFilename = os.path.abspath(specFilename)
print "Importing spec from %s ..." % specFilename
try:
spec = json.loads(open(specFilename).read())
except Exception, e:
raise Exception("Error: invalid JSON data - %s" % e)
## FIXME: this should allow to import not only WAMP test specs,
## but WebSocket test specs as well ..
testSet = WampCaseSet()
db = TestDb([testSet])
def done(res):
op, id, name = res
if op is None:
print "Spec under name '%s' already imported and unchanged (Object ID %s)." % (name, id)
elif op == 'U':
print "Updated spec under name '%s' (Object ID %s)." % (name, id)
elif op == 'I':
print "Imported spec under new name '%s' (Object ID %s)." % (name, id)
print
def failed(failure):
print "Error: spec import failed - %s." % failure.value
d = db.importSpec(spec)
d.addCallbacks(done, failed)
return d
def startExportSpec(self, specName, specFilename = None):
"""
Export a (currently active, if any) test specification from the test database by name.
"""
if specFilename:
specFilename = os.path.abspath(specFilename)
fout = open(specFilename, 'w')
else:
fout = sys.stdout
testSet = WampCaseSet()
db = TestDb([testSet])
def done(res):
id, spec = res
data = json.dumps(spec, sort_keys = True, indent = 3, separators = (',', ': '))
fout.write(data)
fout.write('\n')
if specFilename:
print "Exported spec '%s' to %s." % (specName, specFilename)
print
def failed(failure):
print "Error: spec export failed - %s" % failure.value
print
d = db.getSpecByName(specName)
d.addCallbacks(done, failed)
return d
def startWeb(self, port = 7070, debug = False):
"""
Start Web service for test database.
"""
app = klein.Klein()
app.debug = debug
app.templates = jinja2.Environment(loader = jinja2.FileSystemLoader('autobahntestsuite/templates'))
app.db = TestDb([WampCaseSet()], debug = debug)
app.runner = FuzzingWampClient(app.db, debug = debug)
@app.route('/')
@inlineCallbacks
def page_home(request):
testruns = yield app.db.getTestRuns(limit = 20)
rm = {'fuzzingwampclient': 'WAMP/client'}
cs = {'wamp': 'WAMP'}
for tr in testruns:
started = parseutc(tr['started'])
ended = parseutc(tr['ended'])
endedOrNow = ended if ended else datetime.utcnow()
duration = (endedOrNow - started).seconds
tr['duration'] = duration
if started:
tr['started'] = pprint_timeago(started)
if ended:
tr['ended'] = pprint_timeago(ended)
if tr['total']:
tr['failed'] = tr['total'] - tr['passed']
else:
tr['failed'] = 0
tr['runMode'] = rm[tr['runMode']]
tr['caseSetName'] = cs[tr['caseSetName']]
page = app.templates.get_template('index.html')
returnValue(page.render(testruns = testruns))
@app.route('/testrun/<path:runid>')
@inlineCallbacks
def page_show_testrun(*args, **kwargs):
runid = kwargs.get('runid', None)
testees = yield app.db.getTestRunSummary(runid)
testresults = yield app.db.getTestRunIndex(runid)
for tr in testresults:
tr['index'] = "Case " + '.'.join(str(x) for x in tr['index'][0:4])
for r in tr['results']:
tr['results'][r]['duration'] *= 1000
page = app.templates.get_template('testrun.html')
returnValue(page.render(testees = testees, testresults = testresults))
@app.route('/testresult/<path:resultid>')
@inlineCallbacks
def page_show_testresult(*args, **kwargs):
resultid = kwargs.get('resultid', None)
testresult = yield app.db.getTestResult(resultid)
n = 0
for k in testresult.expected:
n += len(testresult.expected[k])
if n == 0:
testresult.expected = None
n = 0
for k in testresult.observed:
n += len(testresult.observed[k])
if n == 0:
testresult.observed = None
testresult.duration = 1000. * (testresult.ended - testresult.started)
page = app.templates.get_template('testresult.html')
returnValue(page.render(testresult = testresult))
@app.route('/home')
def page_home_deferred_style(request):
d1 = Deferred()
db = TestDb()
d2 = db.getTestRuns()
def process(result):
res = []
for row in result:
obj = {}
obj['runId'] = row[0]
obj['mode'] = row[1]
obj['started'] = row[2]
obj['ended'] = row[3]
res.append(obj)
d1.callback(json.dumps(res))
d2.addCallback(process)
return d1
## serve status stuff from a standard File resource
static_resource = File("autobahntestsuite/static")
## serve a WAMP server to access the testsuite
wamp_factory = WsTestWampFactory(app.db, app.runner, "ws://localhost:%d" % port, debug = debug)
## we MUST start the factory manually here .. Twisted Web won't
## do for us.
wamp_factory.startFactory()
## wire up "dispatch" so that test db/runner can notify
app.db.dispatch = wamp_factory.dispatch
app.runner.dispatch = wamp_factory.dispatch
## wrap in a Twisted Web resource
wamp_resource = WebSocketResource(wamp_factory)
## we need to wrap our resources, since the Klein Twisted Web resource
## does not seem to support putChild(), and we want to have a WebSocket
## resource under path "/ws" and our static file serving under "/static"
root_resource = WSGIRootResource(app.resource(),
{
'static': static_resource,
'ws': wamp_resource
}
)
## serve everything from one port
reactor.listenTCP(port, Site(root_resource), interface = "0.0.0.0")
return True
@inlineCallbacks
def startFuzzingService(self):
spec = self._loadSpec()
if self.mode == 'fuzzingwampclient':
testSet = WampCaseSet()
testDb = TestDb([testSet])
testRunner = FuzzingWampClient(testDb)
runId, resultIds = yield testRunner.run(spec)
print
print "Tests finished: run ID %s, result IDs %d" % (runId, len(resultIds))
print
summary = yield testDb.getTestRunSummary(runId)
tab = Tabify(['l32', 'r5', 'r5'])
print
print tab.tabify(['Testee', 'Pass', 'Fail'])
print tab.tabify()
#for t in sorted(summary.keys()):
for t in summary:
print tab.tabify([t['name'], t['passed'], t['failed']])
print
#for rid in resultIds:
# res = yield testDb.getResult(rid)
# print r.runId, r.id, r.passed, r.started, r.ended, r.ended - r.started
# #pprint(result)
reactor.stop()
elif self.mode == 'fuzzingwampserver':
raise Exception("not implemented")
else:
raise Exception("logic error")
| |
# -*- coding: utf-8 -*-
"""The :program:`celery amqp` command.
.. program:: celery amqp
"""
from __future__ import absolute_import, print_function, unicode_literals
import cmd as _cmd
import pprint
import shlex
import sys
from functools import partial
from itertools import count
from kombu.utils.encoding import safe_str
from celery.bin.base import Command
from celery.five import string_t
from celery.utils.functional import padlist
from celery.utils.serialization import strtobool
__all__ = ('AMQPAdmin', 'AMQShell', 'Spec', 'amqp')
# Map to coerce strings to other types.
COERCE = {bool: strtobool}
HELP_HEADER = """
Commands
--------
""".rstrip()
EXAMPLE_TEXT = """
Example:
-> queue.delete myqueue yes no
"""
say = partial(print, file=sys.stderr)
class Spec(object):
"""AMQP Command specification.
Used to convert arguments to Python values and display various help
and tool-tips.
Arguments:
args (Sequence): see :attr:`args`.
returns (str): see :attr:`returns`.
"""
#: List of arguments this command takes.
#: Should contain ``(argument_name, argument_type)`` tuples.
args = None
#: Helpful human string representation of what this command returns.
#: May be :const:`None`, to signify the return type is unknown.
returns = None
def __init__(self, *args, **kwargs):
self.args = args
self.returns = kwargs.get('returns')
def coerce(self, index, value):
"""Coerce value for argument at index."""
arg_info = self.args[index]
arg_type = arg_info[1]
# Might be a custom way to coerce the string value,
# so look in the coercion map.
return COERCE.get(arg_type, arg_type)(value)
def str_args_to_python(self, arglist):
"""Process list of string arguments to values according to spec.
Example:
>>> spec = Spec([('queue', str), ('if_unused', bool)])
>>> spec.str_args_to_python('pobox', 'true')
('pobox', True)
"""
return tuple(
self.coerce(index, value) for index, value in enumerate(arglist))
def format_response(self, response):
"""Format the return value of this command in a human-friendly way."""
if not self.returns:
return 'ok.' if response is None else response
if callable(self.returns):
return self.returns(response)
return self.returns.format(response)
def format_arg(self, name, type, default_value=None):
if default_value is not None:
return '{0}:{1}'.format(name, default_value)
return name
def format_signature(self):
return ' '.join(self.format_arg(*padlist(list(arg), 3))
for arg in self.args)
def dump_message(message):
if message is None:
return 'No messages in queue. basic.publish something.'
return {'body': message.body,
'properties': message.properties,
'delivery_info': message.delivery_info}
def format_declare_queue(ret):
return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret)
class AMQShell(_cmd.Cmd):
"""AMQP API Shell.
Arguments:
connect (Callable): Function used to connect to the server.
Must return :class:`kombu.Connection` object.
silent (bool): If enabled, the commands won't have annoying
output not relevant when running in non-shell mode.
"""
conn = None
chan = None
prompt_fmt = '{self.counter}> '
identchars = _cmd.IDENTCHARS = '.'
needs_reconnect = False
counter = 1
inc_counter = count(2)
#: Map of built-in command names -> method names
builtins = {
'EOF': 'do_exit',
'exit': 'do_exit',
'help': 'do_help',
}
#: Map of AMQP API commands and their :class:`Spec`.
amqp = {
'exchange.declare': Spec(('exchange', str),
('type', str),
('passive', bool, 'no'),
('durable', bool, 'no'),
('auto_delete', bool, 'no'),
('internal', bool, 'no')),
'exchange.delete': Spec(('exchange', str),
('if_unused', bool)),
'queue.bind': Spec(('queue', str),
('exchange', str),
('routing_key', str)),
'queue.declare': Spec(('queue', str),
('passive', bool, 'no'),
('durable', bool, 'no'),
('exclusive', bool, 'no'),
('auto_delete', bool, 'no'),
returns=format_declare_queue),
'queue.delete': Spec(('queue', str),
('if_unused', bool, 'no'),
('if_empty', bool, 'no'),
returns='ok. {0} messages deleted.'),
'queue.purge': Spec(('queue', str),
returns='ok. {0} messages deleted.'),
'basic.get': Spec(('queue', str),
('no_ack', bool, 'off'),
returns=dump_message),
'basic.publish': Spec(('msg', str),
('exchange', str),
('routing_key', str),
('mandatory', bool, 'no'),
('immediate', bool, 'no')),
'basic.ack': Spec(('delivery_tag', int)),
}
def _prepare_spec(self, conn):
# XXX Hack to fix Issue #2013
from amqp import Connection, Message
if isinstance(conn.connection, Connection):
self.amqp['basic.publish'] = Spec(('msg', Message),
('exchange', str),
('routing_key', str),
('mandatory', bool, 'no'),
('immediate', bool, 'no'))
def __init__(self, *args, **kwargs):
self.connect = kwargs.pop('connect')
self.silent = kwargs.pop('silent', False)
self.out = kwargs.pop('out', sys.stderr)
_cmd.Cmd.__init__(self, *args, **kwargs)
self._reconnect()
def note(self, m):
"""Say something to the user. Disabled if :attr:`silent`."""
if not self.silent:
say(m, file=self.out)
def say(self, m):
say(m, file=self.out)
def get_amqp_api_command(self, cmd, arglist):
"""Get AMQP command wrapper.
With a command name and a list of arguments, convert the arguments
to Python values and find the corresponding method on the AMQP channel
object.
Returns:
Tuple: of `(method, processed_args)` pairs.
"""
spec = self.amqp[cmd]
args = spec.str_args_to_python(arglist)
attr_name = cmd.replace('.', '_')
if self.needs_reconnect:
self._reconnect()
return getattr(self.chan, attr_name), args, spec.format_response
def do_exit(self, *args):
"""The `'exit'` command."""
self.note("\n-> please, don't leave!")
sys.exit(0)
def display_command_help(self, cmd, short=False):
spec = self.amqp[cmd]
self.say('{0} {1}'.format(cmd, spec.format_signature()))
def do_help(self, *args):
if not args:
self.say(HELP_HEADER)
for cmd_name in self.amqp:
self.display_command_help(cmd_name, short=True)
self.say(EXAMPLE_TEXT)
else:
self.display_command_help(args[0])
def default(self, line):
self.say("unknown syntax: {0!r}. how about some 'help'?".format(line))
def get_names(self):
return set(self.builtins) | set(self.amqp)
def completenames(self, text, *ignored):
"""Return all commands starting with `text`, for tab-completion."""
names = self.get_names()
first = [cmd for cmd in names
if cmd.startswith(text.replace('_', '.'))]
if first:
return first
return [cmd for cmd in names
if cmd.partition('.')[2].startswith(text)]
def dispatch(self, cmd, arglist):
"""Dispatch and execute the command.
Look-up order is: :attr:`builtins` -> :attr:`amqp`.
"""
if isinstance(arglist, string_t):
arglist = shlex.split(safe_str(arglist))
if cmd in self.builtins:
return getattr(self, self.builtins[cmd])(*arglist)
fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
return formatter(fun(*args))
def parseline(self, parts):
"""Parse input line.
Returns:
Tuple: of three items:
`(command_name, arglist, original_line)`
"""
if parts:
return parts[0], parts[1:], ' '.join(parts)
return '', '', ''
def onecmd(self, line):
"""Parse line and execute command."""
if isinstance(line, string_t):
line = shlex.split(safe_str(line))
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
self.lastcmd = line
self.counter = next(self.inc_counter)
try:
self.respond(self.dispatch(cmd, arg))
except (AttributeError, KeyError) as exc:
self.default(line)
except Exception as exc: # pylint: disable=broad-except
self.say(exc)
self.needs_reconnect = True
def respond(self, retval):
"""What to do with the return value of a command."""
if retval is not None:
if isinstance(retval, string_t):
self.say(retval)
else:
self.say(pprint.pformat(retval))
def _reconnect(self):
"""Re-establish connection to the AMQP server."""
self.conn = self.connect(self.conn)
self._prepare_spec(self.conn)
self.chan = self.conn.default_channel
self.needs_reconnect = False
@property
def prompt(self):
return self.prompt_fmt.format(self=self)
class AMQPAdmin(object):
"""The celery :program:`celery amqp` utility."""
Shell = AMQShell
def __init__(self, *args, **kwargs):
self.app = kwargs['app']
self.out = kwargs.setdefault('out', sys.stderr)
self.silent = kwargs.get('silent')
self.args = args
def connect(self, conn=None):
if conn:
conn.close()
conn = self.app.connection()
self.note('-> connecting to {0}.'.format(conn.as_uri()))
conn.connect()
self.note('-> connected.')
return conn
def run(self):
shell = self.Shell(connect=self.connect, out=self.out)
if self.args:
return shell.onecmd(self.args)
try:
return shell.cmdloop()
except KeyboardInterrupt:
self.note('(bibi)')
def note(self, m):
if not self.silent:
say(m, file=self.out)
class amqp(Command):
"""AMQP Administration Shell.
Also works for non-AMQP transports (but not ones that
store declarations in memory).
Examples:
.. code-block:: console
$ # start shell mode
$ celery amqp
$ # show list of commands
$ celery amqp help
$ celery amqp exchange.delete name
$ celery amqp queue.delete queue
$ celery amqp queue.delete queue yes yes
"""
def run(self, *args, **options):
options['app'] = self.app
return AMQPAdmin(*args, **options).run()
def main():
amqp().execute_from_commandline()
if __name__ == '__main__': # pragma: no cover
main()
| |
from __future__ import print_function
import inspect
from functools import wraps
from fito.operations.operation import Operation
from fito.specs.fields import UnboundField, PrimitiveField, BaseSpecField, KwargsField, SpecField
from fito.specs.base import get_import_path, Spec
try:
import cPickle
except ImportError:
import pickle as cPickle
class GenericDecorator(Spec):
"""
Abstracts all the boilerplate required to build a decorator that works on functions, instance methods and class methods
:param method_type: if is None, the decorated function is assumed to be a function, otherwise it is assumed
to be a method. If method_type == 'instance' the function is assumed to be an instance method otherwise a
classmethod
"""
method_type = PrimitiveField(0, default=None)
def __get__(self, instance, owner):
if (instance is None and self.method_type == 'instance') or \
(owner is None and self.method_type == 'class'):
return self
first_arg = instance if self.method_type == 'instance' else owner
@wraps(self.func)
def new_f(*args, **kwargs):
return self.func(first_arg, *args, **kwargs)
cls = type(instance) if instance is not None else owner
assert cls is not None
return self.create_decorated(self.func, new_f, inspect.getargspec(self.func), first_arg=first_arg)
def __call__(self, func):
if self.method_type:
self.func = func
return self
else:
return self.create_decorated(func, func)
def create_decorated(self, to_wrap, func_to_execute, f_spec=None, first_arg=None):
"""
Abstract method that should be implemented in order to build a decorator
The difference between `to_wrap` and `func_to_execute` is the fact that in the case of instance methods
and class methods, `func_to_execute` has the first argument already binded.
If `to_wrap` is just a function, then `to_wrap == func_to_execute`
:param to_wrap: Original wrapped function
:param func_to_execute: You should execute this function
:param f_spec: The argspec of the function to be decorated, if None, it should be computed from to_wrap (TODO: remove this argument)
:param first_arg: `self` if it's an instance method, `cls` if it's a classmethod, None otherwise
"""
raise NotImplementedError()
class as_operation(GenericDecorator):
"""
Creates an operation from a callable
:param out_type: Base class of the operation to be built. Defaults to `Operation`
:param out_name: Name of the class to be built, deafults to the decorated function name.
"""
out_type = PrimitiveField(default=Operation)
out_name = PrimitiveField(default=None)
cache_on = SpecField(default=None)
args_specifications = KwargsField()
def create_decorated(self, to_wrap, func_to_execute, f_spec=None, first_arg=None):
f_spec = f_spec or inspect.getargspec(to_wrap)
OperationClass = operation_from_func(
to_wrap=to_wrap,
func_to_execute=func_to_execute,
out_type=self.out_type,
out_name=self.out_name,
args_specifications=self.args_specifications,
f_spec=f_spec,
method_type=self.method_type,
first_arg=first_arg,
cache_on=self.cache_on
)
return OperationClass
@staticmethod
def get_current_operation():
"""
Should be called inside a function decorated with as_operation
"""
# f_back brings you to the calling function, f_back brings you to the apply method of the
# dynamically created operation
frame = inspect.currentframe()
try:
res = frame.f_back.f_back.f_locals['self']
if not isinstance(res, Operation):
raise RuntimeError(
"This function should be called inside an operation created with the as_operation decorator"
)
return res
finally:
# Avoid reference cycle
del frame
def get_default_values(f_spec):
"""
Returns a mapping from a function spec (output of inspect.getargspec)
"""
if f_spec.defaults is None:
default_values = {}
else:
args_with_defaults = f_spec.args[-len(f_spec.defaults):]
default_values = dict(zip(args_with_defaults, f_spec.defaults))
return default_values
def print_fields_from_func(callable):
f_spec = inspect.getargspec(callable)
default_values = get_default_values(f_spec)
for i, arg in enumerate(f_spec.args):
if arg == 'self': continue
i -= f_spec.args[0] == 'self'
default_str = ', default={}'.format(default_values[arg]) if arg in default_values else ''
print('{} = PrimitiveField({}{})'.format(arg, i, default_str))
def operation_from_func(to_wrap, func_to_execute, out_type, out_name, args_specifications, f_spec=None,
method_type=None, first_arg=None, cache_on=None):
"""
In the case of methods, to_wrap is not the same to func_to_execute
:param to_wrap: See `GenericDecorator.create_decorated` for an explanation
:param func_to_execute: See `GenericDecorator.create_decorated` for an explanation
:param cache_on: A data store onto which the operation should be cached
:return:
"""
f_spec = f_spec or inspect.getargspec(to_wrap)
out_name = out_name or to_wrap.__name__
# TODO: find the first_arg where the method was defined
if method_type == 'instance' and not isinstance(first_arg, Spec):
# Only when it's an instance of Spec we can identify
out_name = '{}@{}'.format(out_name, id(first_arg))
default_values = get_default_values(f_spec)
attrs = {}
binded_pos = 0
unbinded_pos = 0
for arg in f_spec.args:
if method_type == 'instance' and arg == 'self': continue
if method_type == 'class' and arg == 'cls': continue
if arg in args_specifications:
spec = args_specifications[arg]
if inspect.isclass(spec) and issubclass(spec, Spec):
spec = SpecField(base_type=spec)
# It can be either a class, or the instance itself
if inspect.isclass(spec) or inspect.isfunction(spec): spec = spec()
if isinstance(spec, UnboundField):
spec.pos = unbinded_pos
unbinded_pos += 1
else:
spec.pos = binded_pos
binded_pos += 1
else:
spec = PrimitiveField(binded_pos)
binded_pos += 1
if arg in default_values: spec.default = default_values[arg]
attrs[arg] = spec
def get_this_args(self, runner=None):
this_args = {}
for k, v in attrs.iteritems():
value = getattr(self, k)
if isinstance(v, BaseSpecField) and runner is not None and isinstance(value, Operation):
value = runner.execute(value)
this_args[k] = value
return this_args
def to_dict(self, include_all=False):
res = super(out_type, self).to_dict(include_all=include_all)
if method_type is not None:
res['type'] = get_import_path(first_arg, func_to_execute.__name__)
else:
res['type'] = get_import_path(func_to_execute)
return res
@property
def self(self):
if method_type is None:
raise RuntimeError('Can only be called with an operation created from a method')
return first_arg
def apply(self, runner):
this_args = self.get_this_args(runner)
return func_to_execute(**this_args)
cls_attrs = attrs.copy()
cls_attrs['func'] = staticmethod(func_to_execute)
cls_attrs['apply'] = apply
cls_attrs['get_this_args'] = get_this_args
cls_attrs['to_dict'] = to_dict
cls_attrs['self'] = self
cls = Operation.type2spec_class(out_name)
if cls is None:
# if the class does not exist, create it
cls = type(out_name, (out_type,), cls_attrs)
else:
# otherwise update it
for k, v in cls_attrs.iteritems():
setattr(cls, k, v)
if cache_on is not None:
cls.default_data_store = cache_on
else:
cls.default_data_store = None
cls.__module__ = to_wrap.__module__
return cls
| |
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.configs.benchmark_config_spec."""
import os
import unittest
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import os_types
from perfkitbenchmarker import providers
from perfkitbenchmarker import static_virtual_machine
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.aws import aws_disk
from perfkitbenchmarker.providers.gcp import gce_virtual_machine
from tests import mock_flags
_COMPONENT = 'test_component'
_OPTION = 'test_option'
_GCP_ONLY_VM_CONFIG = {'GCP': {'machine_type': 'n1-standard-1'}}
_GCP_AWS_VM_CONFIG = {'GCP': {'machine_type': 'n1-standard-1'},
'AWS': {'machine_type': 'm4.large'}}
_GCP_AWS_DISK_CONFIG = {'GCP': {}, 'AWS': {}}
class FlagsDecoderTestCase(unittest.TestCase):
def setUp(self):
super(FlagsDecoderTestCase, self).setUp()
self._decoder = benchmark_config_spec._FlagsDecoder(option=_OPTION)
self._flag_values = flags.FlagValues()
flags.DEFINE_integer('test_flag', 0, 'Test flag.',
flag_values=self._flag_values)
def assertResultIsCorrect(self, result, expected_flag_value,
expected_flag_present):
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 1)
self.assertEqual(result['test_flag'].value, expected_flag_value)
self.assertEqual(result['test_flag'].present, expected_flag_present)
self.assertIsNot(result, self._flag_values.FlagDict())
def testNone(self):
result = self._decoder.Decode(None, _COMPONENT, self._flag_values)
self.assertResultIsCorrect(result, 0, False)
def testEmptyDict(self):
result = self._decoder.Decode({}, _COMPONENT, self._flag_values)
self.assertResultIsCorrect(result, 0, False)
def testValidFlagOverride(self):
result = self._decoder.Decode({'test_flag': 1}, _COMPONENT,
self._flag_values)
self.assertResultIsCorrect(result, 1, True)
def testPresentFlagNotOverridden(self):
self._flag_values['test_flag'].present = True
result = self._decoder.Decode({'test_flag': 1}, _COMPONENT,
self._flag_values)
self.assertResultIsCorrect(result, 0, True)
def testInvalidValueType(self):
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._decoder.Decode(0, _COMPONENT, self._flag_values)
self.assertEqual(str(cm.exception), (
'Invalid test_component.test_option value: "0" (of type "int"). Value '
'must be one of the following types: NoneType, dict.'))
def testInvalidFlagName(self):
with self.assertRaises(errors.Config.UnrecognizedOption) as cm:
self._decoder.Decode({'flag': 1}, _COMPONENT, self._flag_values)
self.assertEqual(str(cm.exception), (
'Unrecognized option test_component.test_option.flag. Each option '
'within test_component.test_option must correspond to a valid '
'command-line flag.'))
def testInvalidFlagValue(self):
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._decoder.Decode({'test_flag': 'two'}, _COMPONENT, self._flag_values)
self.assertEqual(str(cm.exception), (
'Invalid test_component.test_option.test_flag value: "two" (of type '
'"str").{0}flag --test_flag=two: invalid literal for int() with base '
"10: 'two'".format(os.linesep)))
class PerCloudConfigSpecTestCase(unittest.TestCase):
def setUp(self):
super(PerCloudConfigSpecTestCase, self).setUp()
self._spec_class = benchmark_config_spec._PerCloudConfigSpec
def testDefaults(self):
spec = self._spec_class(_COMPONENT)
for cloud in providers.VALID_CLOUDS:
self.assertIsNone(getattr(spec, cloud))
def testDict(self):
spec = self._spec_class(_COMPONENT, GCP={})
self.assertEqual(spec.GCP, {})
for cloud in frozenset(providers.VALID_CLOUDS).difference([providers.GCP]):
self.assertIsNone(getattr(spec, cloud))
def testNonDict(self):
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, GCP=[])
self.assertEqual(str(cm.exception), (
'Invalid test_component.GCP value: "[]" (of type "list"). Value must '
'be one of the following types: dict.'))
def testUnrecognizedCloud(self):
with self.assertRaises(errors.Config.UnrecognizedOption) as cm:
self._spec_class(_COMPONENT, fake_provider={})
self.assertEqual(str(cm.exception), (
'Unrecognized options were found in test_component: fake_provider.'))
class PerCloudConfigDecoderTestCase(unittest.TestCase):
def setUp(self):
super(PerCloudConfigDecoderTestCase, self).setUp()
self._decoder = benchmark_config_spec._PerCloudConfigDecoder(option=_OPTION)
def testRejectNone(self):
with self.assertRaises(errors.Config.InvalidValue):
self._decoder.Decode(None, _COMPONENT, {})
def testAcceptNone(self):
decoder = benchmark_config_spec._PerCloudConfigDecoder(none_ok=True,
option=_OPTION)
self.assertIsNone(decoder.Decode(None, _COMPONENT, {}))
def testEmptyDict(self):
result = self._decoder.Decode({}, _COMPONENT, {})
self.assertIsInstance(result, benchmark_config_spec._PerCloudConfigSpec)
self.assertEqual(result.__dict__, {
cloud: None for cloud in providers.VALID_CLOUDS})
def testNonEmptyDict(self):
result = self._decoder.Decode(_GCP_ONLY_VM_CONFIG, _COMPONENT, {})
self.assertIsInstance(result, benchmark_config_spec._PerCloudConfigSpec)
expected_attributes = {cloud: None for cloud in providers.VALID_CLOUDS}
expected_attributes['GCP'] = {'machine_type': 'n1-standard-1'}
self.assertEqual(result.__dict__, expected_attributes)
class StaticVmDecoderTestCase(unittest.TestCase):
def setUp(self):
super(StaticVmDecoderTestCase, self).setUp()
self._decoder = benchmark_config_spec._StaticVmDecoder()
def testNone(self):
with self.assertRaises(errors.Config.InvalidValue):
self._decoder.Decode(None, _COMPONENT, {})
def testValidInput(self):
result = self._decoder.Decode({'ssh_port': 111}, _COMPONENT, {})
self.assertIsInstance(result, static_virtual_machine.StaticVmSpec)
self.assertEqual(result.ssh_port, 111)
class StaticVmListDecoderTestCase(unittest.TestCase):
def setUp(self):
super(StaticVmListDecoderTestCase, self).setUp()
self._decoder = benchmark_config_spec._StaticVmListDecoder()
def testNone(self):
with self.assertRaises(errors.Config.InvalidValue):
self._decoder.Decode(None, _COMPONENT, {})
def testValidList(self):
input_list = [{'ssh_port': i} for i in xrange(3)]
result = self._decoder.Decode(input_list, _COMPONENT, {})
self.assertIsInstance(result, list)
self.assertEqual([vm_spec.ssh_port for vm_spec in result], range(3))
def testInvalidList(self):
input_list = [{'ssh_port': 0}, {'ssh_port': 1}, {'ssh_pory': 2}]
with self.assertRaises(errors.Config.UnrecognizedOption) as cm:
self._decoder.Decode(input_list, _COMPONENT, {})
self.assertEqual(str(cm.exception), (
'Unrecognized options were found in test_component[2]: ssh_pory.'))
class VmGroupSpecTestCase(unittest.TestCase):
def setUp(self):
super(VmGroupSpecTestCase, self).setUp()
self._spec_class = benchmark_config_spec._VmGroupSpec
self._kwargs = {'cloud': providers.GCP, 'os_type': os_types.DEBIAN,
'vm_spec': _GCP_AWS_VM_CONFIG}
def testMissingValues(self):
with self.assertRaises(errors.Config.MissingOption) as cm:
self._spec_class(_COMPONENT)
self.assertEqual(str(cm.exception), (
'Required options were missing from test_component: cloud, os_type, '
'vm_spec.'))
def testDefaults(self):
result = self._spec_class(_COMPONENT, **self._kwargs)
self.assertIsInstance(result, benchmark_config_spec._VmGroupSpec)
self.assertEqual(result.cloud, 'GCP')
self.assertEqual(result.disk_count, 1)
self.assertIsNone(result.disk_spec)
self.assertEqual(result.os_type, 'debian')
self.assertEqual(result.static_vms, [])
self.assertEqual(result.vm_count, 1)
self.assertIsInstance(result.vm_spec, gce_virtual_machine.GceVmSpec)
def testInvalidCloud(self):
self._kwargs['cloud'] = 'fake_provider'
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.cloud value: "fake_provider". Value must be '
'one of the following: {0}.'.format(', '.join(providers.VALID_CLOUDS))))
def testInvalidDiskCount(self):
self._kwargs['disk_count'] = None
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.disk_count value: "None" (of type "NoneType"). '
'Value must be one of the following types: int.'))
self._kwargs['disk_count'] = -1
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.disk_count value: "-1". '
'Value must be at least 0.'))
def testInvalidDiskSpec(self):
self._kwargs['disk_spec'] = {'GCP': None}
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.disk_spec.GCP value: "None" (of type '
'"NoneType"). Value must be one of the following types: dict.'))
def testInvalidOsType(self):
self._kwargs['os_type'] = 'fake_os_type'
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.os_type value: "fake_os_type". Value must be '
'one of the following: {0}.'.format(', '.join(os_types.ALL))))
def testInvalidStaticVms(self):
self._kwargs['static_vms'] = [{'fake_option': None}]
with self.assertRaises(errors.Config.UnrecognizedOption) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Unrecognized options were found in test_component.static_vms[0]: '
'fake_option.'))
def testInvalidVmCount(self):
self._kwargs['vm_count'] = None
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.vm_count value: "None" (of type "NoneType"). '
'Value must be one of the following types: int.'))
self._kwargs['vm_count'] = -1
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.vm_count value: "-1". '
'Value must be at least 0.'))
def testInvalidVmSpec(self):
self._kwargs['vm_spec'] = {'GCP': None}
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.vm_spec.GCP value: "None" (of type '
'"NoneType"). Value must be one of the following types: dict.'))
def testValidInput(self):
result = self._spec_class(
_COMPONENT, cloud=providers.AWS, disk_count=0,
disk_spec=_GCP_AWS_DISK_CONFIG, os_type=os_types.WINDOWS,
static_vms=[{}], vm_count=0, vm_spec=_GCP_AWS_VM_CONFIG)
self.assertIsInstance(result, benchmark_config_spec._VmGroupSpec)
self.assertEqual(result.cloud, 'AWS')
self.assertEqual(result.disk_count, 0)
self.assertIsInstance(result.disk_spec, aws_disk.AwsDiskSpec)
self.assertEqual(result.os_type, 'windows')
self.assertIsInstance(result.static_vms, list)
self.assertEqual(len(result.static_vms), 1)
self.assertIsInstance(result.static_vms[0],
static_virtual_machine.StaticVmSpec)
self.assertEqual(result.vm_count, 0)
self.assertIsInstance(result.vm_spec, virtual_machine.BaseVmSpec)
def testMissingCloudDiskConfig(self):
with self.assertRaises(errors.Config.MissingOption) as cm:
self._spec_class(_COMPONENT, cloud=providers.GCP, os_type=os_types.DEBIAN,
disk_spec={}, vm_spec=_GCP_AWS_VM_CONFIG)
self.assertEqual(str(cm.exception), (
'test_component.cloud is "GCP", but test_component.disk_spec does not '
'contain a configuration for "GCP".'))
def testMissingCloudVmConfig(self):
with self.assertRaises(errors.Config.MissingOption) as cm:
self._spec_class(_COMPONENT, cloud=providers.GCP, os_type=os_types.DEBIAN,
vm_spec={})
self.assertEqual(str(cm.exception), (
'test_component.cloud is "GCP", but test_component.vm_spec does not '
'contain a configuration for "GCP".'))
def createNonPresentFlags(self):
result = mock_flags.MockFlags()
result['cloud'].value = providers.AWS
result['num_vms'].value = 3
result['os_type'].value = os_types.WINDOWS
return result
def createPresentFlags(self):
result = self.createNonPresentFlags()
result['cloud'].present = True
result['num_vms'].present = True
result['os_type'].present = True
return result
def testPresentFlagsAndPresentConfigValues(self):
result = self._spec_class(_COMPONENT, flag_values=self.createPresentFlags(),
vm_count=2, **self._kwargs)
self.assertEqual(result.cloud, 'AWS')
self.assertEqual(result.os_type, 'windows')
self.assertEqual(result.vm_count, 2)
def testPresentFlagsAndNonPresentConfigValues(self):
result = self._spec_class(_COMPONENT, flag_values=self.createPresentFlags(),
vm_spec=_GCP_AWS_VM_CONFIG)
self.assertEqual(result.cloud, 'AWS')
self.assertEqual(result.os_type, 'windows')
self.assertEqual(result.vm_count, 1)
def testNonPresentFlagsAndPresentConfigValues(self):
result = self._spec_class(
_COMPONENT, flag_values=self.createNonPresentFlags(), vm_count=2,
**self._kwargs)
self.assertEqual(result.cloud, 'GCP')
self.assertEqual(result.os_type, 'debian')
self.assertEqual(result.vm_count, 2)
def testVmCountNone(self):
result = self._spec_class(
_COMPONENT, flag_values=self.createNonPresentFlags(), vm_count=None,
**self._kwargs)
self.assertEqual(result.vm_count, 3)
def testCallsLoadProviderAndChecksRequirements(self):
flag_values = self.createNonPresentFlags()
flag_values.ignore_package_requirements = False
with mock.patch(providers.__name__ + '.LoadProvider'):
self._spec_class(_COMPONENT, flag_values, **self._kwargs)
providers.LoadProvider.assert_called_once_with('GCP', False)
def testCallsLoadProviderAndIgnoresRequirements(self):
flag_values = self.createNonPresentFlags()
flag_values.ignore_package_requirements = True
with mock.patch(providers.__name__ + '.LoadProvider'):
self._spec_class(_COMPONENT, flag_values, **self._kwargs)
providers.LoadProvider.assert_called_once_with('GCP', True)
class VmGroupsDecoderTestCase(unittest.TestCase):
def setUp(self):
super(VmGroupsDecoderTestCase, self).setUp()
self._decoder = benchmark_config_spec._VmGroupsDecoder()
def testNone(self):
with self.assertRaises(errors.Config.InvalidValue):
self._decoder.Decode(None, _COMPONENT, {})
def testValidInput(self):
result = self._decoder.Decode({
'default': {'cloud': providers.GCP, 'os_type': os_types.DEBIAN,
'vm_spec': _GCP_AWS_VM_CONFIG}}, _COMPONENT, {})
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 1)
self.assertIsInstance(result['default'], benchmark_config_spec._VmGroupSpec)
self.assertEqual(result['default'].cloud, 'GCP')
self.assertEqual(result['default'].os_type, 'debian')
self.assertIsInstance(result['default'].vm_spec,
gce_virtual_machine.GceVmSpec)
def testInvalidInput(self):
with self.assertRaises(errors.Config.UnrecognizedOption) as cm:
self._decoder.Decode(
{'default': {'cloud': providers.GCP, 'os_type': os_types.DEBIAN,
'static_vms': [{}, {'fake_option': 1.2}],
'vm_spec': _GCP_AWS_VM_CONFIG}},
_COMPONENT, {})
self.assertEqual(str(cm.exception), (
'Unrecognized options were found in '
'test_component.default.static_vms[1]: fake_option.'))
class BenchmarkConfigSpecTestCase(unittest.TestCase):
def setUp(self):
super(BenchmarkConfigSpecTestCase, self).setUp()
self._spec_class = benchmark_config_spec.BenchmarkConfigSpec
self._description = 'Test description.'
self._vm_groups = {'default': {'cloud': providers.GCP,
'os_type': os_types.DEBIAN,
'vm_spec': _GCP_AWS_VM_CONFIG}}
self._kwargs = {'description': self._description,
'vm_groups': self._vm_groups}
def testValidInput(self):
result = self._spec_class(_COMPONENT, flag_values=flags.FLAGS,
**self._kwargs)
self.assertIsInstance(result, benchmark_config_spec.BenchmarkConfigSpec)
self.assertEqual(result.description, 'Test description.')
self.assertIsInstance(result.flags, dict)
self.assertEqual(sorted(result.flags.keys()),
sorted(flags.FLAGS.FlagDict().keys()))
self.assertIsNot(result.flags, flags.FLAGS.FlagDict())
self.assertIsInstance(result.vm_groups, dict)
self.assertEqual(len(result.vm_groups), 1)
self.assertIsInstance(result.vm_groups['default'],
benchmark_config_spec._VmGroupSpec)
self.assertEqual(result.vm_groups['default'].cloud, 'GCP')
self.assertEqual(result.vm_groups['default'].os_type, 'debian')
self.assertIsInstance(result.vm_groups['default'].vm_spec,
gce_virtual_machine.GceVmSpec)
def testInvalidVmGroups(self):
self._kwargs['vm_groups']['default']['static_vms'] = [{'disk_specs': [{
'disk_size': 0.5}]}]
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, flag_values=flags.FLAGS, **self._kwargs)
self.assertEqual(str(cm.exception), (
'Invalid test_component.vm_groups.default.static_vms[0].disk_specs[0]'
'.disk_size value: "0.5" (of type "float"). Value must be one of the '
'following types: NoneType, int.'))
def testMismatchedOsTypes(self):
self._kwargs['vm_groups'] = {
os_type + '_group': {'os_type': os_type, 'vm_spec': _GCP_AWS_VM_CONFIG}
for os_type in (os_types.DEBIAN, os_types.RHEL, os_types.WINDOWS)}
expected_os_types = os_types.JUJU, os_types.WINDOWS
with self.assertRaises(errors.Config.InvalidValue) as cm:
self._spec_class(_COMPONENT, expected_os_types=expected_os_types,
flag_values=flags.FLAGS, **self._kwargs)
self.assertEqual(str(cm.exception), (
"VM groups in test_component may only have the following OS types: "
"'juju', 'windows'. The following VM group options are invalid:{sep}"
"test_component.vm_groups['debian_group'].os_type: 'debian'{sep}"
"test_component.vm_groups['rhel_group'].os_type: 'rhel'".format(
sep=os.linesep)))
def testFlagOverridesPropagate(self):
self._kwargs['flags'] = {'cloud': providers.AWS,
'ignore_package_requirements': True}
result = self._spec_class(_COMPONENT, flag_values=flags.FLAGS,
**self._kwargs)
self.assertIsInstance(result, benchmark_config_spec.BenchmarkConfigSpec)
self.assertEqual(result.description, 'Test description.')
self.assertIsInstance(result.flags, dict)
self.assertEqual(sorted(result.flags.keys()),
sorted(flags.FLAGS.FlagDict().keys()))
self.assertIsNot(result.flags, flags.FLAGS.FlagDict())
self.assertEqual(result.flags['cloud'].value, 'AWS')
self.assertEqual(flags.FLAGS['cloud'].value, 'GCP')
self.assertIsInstance(result.vm_groups, dict)
self.assertEqual(len(result.vm_groups), 1)
self.assertIsInstance(result.vm_groups['default'],
benchmark_config_spec._VmGroupSpec)
self.assertEqual(result.vm_groups['default'].cloud, 'AWS')
self.assertEqual(result.vm_groups['default'].os_type, 'debian')
self.assertIsInstance(result.vm_groups['default'].vm_spec,
virtual_machine.BaseVmSpec)
if __name__ == '__main__':
unittest.main()
| |
from __future__ import unicode_literals
import ctypes
import json
import random
import unittest
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
from ..test_data import TestDataMixin
if HAS_GEOS:
from django.contrib.gis.geos import (
GEOSException, GEOSIndexError, GEOSGeometry, GeometryCollection, Point,
MultiPoint, Polygon, MultiPolygon, LinearRing, LineString,
MultiLineString, fromfile, fromstr, geos_version_info,
)
from django.contrib.gis.geos.base import gdal, GEOSBase
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz and geos_version_info()['version'] >= '3.3.0':
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
""" Test with a srid unknown from GDAL """
pnt = Point(111200, 220900, srid=999999)
self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1., 2.))
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
if geos_version_info()['version'] > '3.3.0':
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS, "Geos is required.")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
| |
import json
import boto3
import sure # noqa
from moto import mock_s3, mock_cloudformation
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_basic():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_with_properties():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
bucket_name = "MyBucket"
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketName": bucket_name,
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
},
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=bucket_name)
encryption = s3.get_bucket_encryption(Bucket=bucket_name)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_no_interruption():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
}
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
encryption = s3.get_bucket_encryption(
Bucket=stack_description["Outputs"][0]["OutputValue"]
)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_replacement():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": "MyNewBucketName"},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_outputs():
region_name = "us-east-1"
s3 = boto3.client("s3", region_name=region_name)
cf = boto3.resource("cloudformation", region_name=region_name)
stack_name = "test-stack"
bucket_name = "test-bucket"
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"TestBucket": {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": bucket_name},
}
},
"Outputs": {
"BucketARN": {
"Value": {"Fn::GetAtt": ["TestBucket", "Arn"]},
"Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketARN"}},
},
"BucketDomainName": {
"Value": {"Fn::GetAtt": ["TestBucket", "DomainName"]},
"Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketDomainName"}},
},
"BucketDualStackDomainName": {
"Value": {"Fn::GetAtt": ["TestBucket", "DualStackDomainName"]},
"Export": {
"Name": {"Fn::Sub": "${AWS::StackName}:BucketDualStackDomainName"}
},
},
"BucketRegionalDomainName": {
"Value": {"Fn::GetAtt": ["TestBucket", "RegionalDomainName"]},
"Export": {
"Name": {"Fn::Sub": "${AWS::StackName}:BucketRegionalDomainName"}
},
},
"BucketWebsiteURL": {
"Value": {"Fn::GetAtt": ["TestBucket", "WebsiteURL"]},
"Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketWebsiteURL"}},
},
"BucketName": {
"Value": {"Ref": "TestBucket"},
"Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketName"}},
},
},
}
cf.create_stack(StackName=stack_name, TemplateBody=json.dumps(template))
outputs_list = cf.Stack(stack_name).outputs
output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list}
s3.head_bucket(Bucket=output["BucketName"])
output["BucketARN"].should.match("arn:aws:s3.+{bucket}".format(bucket=bucket_name))
output["BucketDomainName"].should.equal(
"{bucket}.s3.amazonaws.com".format(bucket=bucket_name)
)
output["BucketDualStackDomainName"].should.equal(
"{bucket}.s3.dualstack.{region}.amazonaws.com".format(
bucket=bucket_name, region=region_name
)
)
output["BucketRegionalDomainName"].should.equal(
"{bucket}.s3.{region}.amazonaws.com".format(
bucket=bucket_name, region=region_name
)
)
output["BucketWebsiteURL"].should.equal(
"http://{bucket}.s3-website.{region}.amazonaws.com".format(
bucket=bucket_name, region=region_name
)
)
output["BucketName"].should.equal(bucket_name)
| |
# Copyright (c) 2010, 2011 Arek Korbik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Action Message Format 0 handling library.
Implemented AMF0 -> Python type mapping:
* number -> float
* boolean -> bool
* string -> str, unicode
* object -> afm0.Object
* null -> None
* undefined -> afm0.UndefinedType
* reference -> afm0.Reference
* ecma-array -> afm0.ECMAArray
* strict-array -> list
* date -> datetime
* long string -> str, unicode
* xml document -> afm0.XMLDocument
Implemented Python -> AMF0 type mapping:
* int, long, float -> number
* bool -> boolean
* str, unicode, buffer -> string, long string
* afm0.Object -> object
* None -> null
* afm0.UndefinedType -> undefined
* afm0.Reference -> reference
* afm0.ECMAArray, dict -> ecma-array
* list, tuple -> strict-array
* datetime -> date
* afm0.XMLDocument -> xml document
"""
import calendar
import datetime
import struct
import sys
from UserDict import DictMixin
from primitives import _s_double, _s_ushort, _s_ulong_b as _s_ulong
from primitives import _s_date_tz
from vecbuf import VecBuf, VecBufEOB
# A UTC tzinfo class, temporarily here, until full support for
# timezones is implemented...
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# ordered dict() necessary, where should it go?...
class OrderedDict(DictMixin, object):
def __init__(self, other=None, **kw):
self._keys = []
self._data = {}
if other:
self.update(other)
self.update(**kw)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._keys)
if sys.version_info < (2, 6):
def __hash__(self):
raise TypeError('unhashable type: %r' % self.__class__.__name__)
else:
__hash__ = None
def keys(self):
return self._keys[:]
def iteritems(self):
return ((k, self._data.__getitem__(k)) for k in self._keys)
def copy(self):
d = OrderedDict()
d._data = self._data.copy()
d._keys = self._keys[:]
return d
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.items())
class OrderedObject(OrderedDict):
def __init__(self, other=None, **kw):
# overriding __init__ completely!
OrderedDict.__setattr__(self, '_keys', [])
OrderedDict.__setattr__(self, '_data', {})
if other:
self.update(other)
self.update(**kw)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError('%r object has no attribute %r' %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name not in self._data:
self._keys.append(name)
self._data[name] = value
def __delattr__(self, name):
try:
del self._data[name]
except KeyError:
raise AttributeError(name)
self._keys.remove(name)
def copy(self):
return self.__class__(self)
def s(self, **kw):
"""Convenience method for initializing/setting in order:
o = OrderedObject(d=4).s(b=1).s(a=3).s(c=2)
"""
self.update(**kw)
return self
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(('%s=%r' % elt) for elt in self.items()))
class DecoderError(ValueError):
pass
class EncoderError(ValueError):
pass
class UndefinedType(object):
def __eq__(self, other):
return isinstance(other, UndefinedType)
def __ne__(self, other):
return not isinstance(other, UndefinedType)
def __hash__(self):
return 0
def __repr__(self):
return 'undefined'
undefined = UndefinedType()
class Object(OrderedObject):
"""AMF Object class, with chronological attribute ordering, for
increased compatibility.
"""
class Reference(object):
def __init__(self, index):
self.index = index
def __eq__(self, other):
return isinstance(other, Reference) and self.index == other.index
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.index
def __repr__(self):
return '%s(%d)' % (self.__class__.__name__, self.index)
class ECMAArray(OrderedDict):
pass
class XMLDocument(unicode):
pass
##
# decoder part
#
def _decode_marker(s):
return ord(s.read(1)[0])
def _decode_number(s):
return _s_double.unpack(s.read(8))[0]
def _decode_boolean(s):
return s.read(1)[0] != '\x00'
def _decode_any_string(s, unpacker):
str_len, = unpacker.unpack(s.read(unpacker.size))
read = s.read(str_len)
try:
return str(read).encode('ascii')
except UnicodeDecodeError:
pass
try:
return unicode(read, 'utf-8')
except UnicodeDecodeError:
raise DecoderError('Invalid string encoding')
def _decode_string(s):
return _decode_any_string(s, _s_ushort)
def _decode_object_like(s, setter):
while 1:
name = _decode_string(s)
if name == '':
if _decode_marker(s) != MARK_OBJECT_END:
raise DecoderError('Missing object end marker')
break
value = _decode_single(s, type_dict=object_decoders)
setter(name, value)
def _decode_object(s):
ret = Object()
_decode_object_like(s, lambda k, v: setattr(ret, k, v))
return ret
def _decode_null(s):
return None
def _decode_undefined(s):
return undefined
def _decode_reference(s):
return Reference(_s_ushort.unpack(s.read(2))[0])
def _decode_ecma_array(s):
s.read(4) # skip unused(?) length
ret = ECMAArray()
_decode_object_like(s, lambda k, v: ret.__setitem__(k, v))
return ret
def _decode_strict_array(s):
length, = _s_ulong.unpack(s.read(4))
return [_decode_single(s) for _ in xrange(length)]
def _decode_date(s):
# TODO: don't ignore timezone
# FIXME
milliseconds, tz = _s_date_tz.unpack(s.read(10))
return datetime.datetime.fromtimestamp(milliseconds / 1000.0, utc)
def _decode_long_string(s):
return _decode_any_string(s, _s_ulong)
def _decode_xml_document(s):
return XMLDocument(_decode_long_string(s))
def _decode_typed_object(s):
raise DecoderError('Typed objects unsupported a.t.m.')
def _decode_unsupported(s):
raise DecoderError('Unsupported unsupported')
(MARK_NUMBER, MARK_BOOL, MARK_STRING, MARK_OBJECT, MARK_MOVIECLIP, MARK_NULL,
MARK_UNDEFINED, MARK_REFERENCE, MARK_ECMA_ARRAY, MARK_OBJECT_END,
MARK_STRICT_ARRAY, MARK_DATE, MARK_LONG_STRING, MARK_UNSUPPORTED,
MARK_RECORDSET, MARK_XML_DOCUMENT, MARK_TYPED_OBJECT,
MARK_AVMPLUS_OBJECT) = range(0x12)
object_decoders = {
MARK_NUMBER: _decode_number,
MARK_BOOL: _decode_boolean,
MARK_STRING: _decode_string,
MARK_OBJECT: _decode_object,
MARK_NULL: _decode_null,
MARK_UNDEFINED: _decode_undefined,
MARK_REFERENCE: _decode_reference,
MARK_ECMA_ARRAY: _decode_ecma_array,
MARK_STRICT_ARRAY: _decode_strict_array,
MARK_DATE: _decode_date,
MARK_LONG_STRING: _decode_long_string,
MARK_XML_DOCUMENT: _decode_xml_document,
MARK_TYPED_OBJECT: _decode_typed_object,
}
# def _debug_wrapper(f):
# def _wrap(*a, **kw):
# print '(%s) calling %s(%r, %r)' % (len(a[0]), f.__name__, a, kw)
# ret = f(*a, **kw)
# print '(%s) got: %r' % (len(a[0]), ret)
# return ret
# return _wrap
# object_decoders = dict((k, _debug_wrapper(f))
# for (k, f) in object_decoders.items())
decoders = object_decoders.copy()
decoders.update({
# MARK_MOVIECLIP: _decode_movieclip,
MARK_UNSUPPORTED: _decode_unsupported,
# MARK_RECORDSET: _decode_recordset,
# MARK_AVMPLUS_OBJECT: _decode_avmplus_object,
})
def _decode_single(s, type_dict=decoders):
# not checking EOB on marker - decoding exactly one object
marker = _decode_marker(s)
decoder = type_dict.get(marker, None)
if not decoder:
raise DecoderError('Unsupported marker 0x%02x' % marker)
return decoder(s)
def _decode(s, type_dict=decoders):
values = []
while 1:
try:
marker = _decode_marker(s)
except VecBufEOB:
break
decoder = type_dict.get(marker, None)
if not decoder:
raise DecoderError('Unsupported marker 0x%02x' % marker)
values.append(decoder(s))
return values
##
# encoder part
#
# the following structs are not generic enough to be put in primitives...
_s_m_empty = struct.Struct('>B')
_s_m_len = struct.Struct('>BH')
_s_m_longlen = struct.Struct('>BL')
_s_m_double = struct.Struct('>Bd')
_s_m_boolean = struct.Struct('>BB')
_s_m_reference = _s_m_len
_s_m_time_tz = struct.Struct('>Bdh')
_s_endmarker = struct.Struct('>HB')
def _encode_number(s, value):
s.write(_s_m_double.pack(MARK_NUMBER, value))
def _encode_boolean(s, value):
s.write(_s_m_boolean.pack(MARK_BOOL, 1 if value else 0))
def _encode_string(s, value):
length = len(value)
if length > 0xffff:
s.write(_s_m_longlen.pack(MARK_LONG_STRING, length))
else:
s.write(_s_m_len.pack(MARK_STRING, length))
s.write(value)
def _encode_unicode(s, value):
_encode_string(s, value.encode('utf-8'))
def _encode_null(s, _value):
s.write(_s_m_empty.pack(MARK_NULL))
def _encode_undefined(s, _value):
s.write(_s_m_empty.pack(MARK_UNDEFINED))
def _encode_reference(s, value):
if 0 <= value.index <= 0xffff:
s.write(_s_m_reference.pack(MARK_REFERENCE, value.index))
return
raise EncoderError('Reference not in range [0; 65535]')
def _encode_strict_array(s, value):
length = len(value)
if not (0 <= length <= 0xffffffff):
raise EncoderError('Sequence too long')
s.write(_s_m_longlen.pack(MARK_STRICT_ARRAY, length))
for elt in value:
_encode_single(s, elt)
def _encode_date(s, value):
# FIXME
seconds = calendar.timegm(value.utctimetuple())
s.write(_s_m_time_tz.pack(MARK_DATE, int(seconds * 1000), 0))
def _encode_xml_document(s, value):
string = value.encode('utf-8')
s.write(_s_m_longlen.pack(MARK_XML_DOCUMENT, len(string)))
s.write(string)
def _encode_property_name(s, value):
s.write(_s_ushort.pack(len(value)))
s.write(value)
def _encode_object_like_content(s, value):
for k, v in value.iteritems():
_encode_property_name(s, k)
_encode_single(s, v, type_dict=object_encoders)
s.write(_s_endmarker.pack(0, MARK_OBJECT_END))
def _encode_ecma_array(s, value):
s.write(_s_m_longlen.pack(MARK_ECMA_ARRAY, len(value)))
_encode_object_like_content(s, value)
def _encode_object(s, value):
s.write(_s_m_empty.pack(MARK_OBJECT))
_encode_object_like_content(s, value)
object_encoders = {
float: _encode_number,
int: _encode_number,
long: _encode_number,
bool: _encode_boolean,
str: _encode_string,
buffer: _encode_string,
unicode: _encode_unicode,
Object: _encode_object,
None.__class__: _encode_null,
UndefinedType: _encode_undefined,
Reference: _encode_reference,
ECMAArray: _encode_ecma_array,
dict: _encode_ecma_array,
list: _encode_strict_array,
tuple: _encode_strict_array,
datetime.datetime: _encode_date,
XMLDocument: _encode_xml_document,
}
encoders = object_encoders
def _encode_single(s, value, type_dict=encoders):
try:
value_class = value.__class__
except AttributeError:
raise EncoderError("Unable to encode values of type %r" % type(value))
encoder = type_dict.get(value_class, None)
if not encoder:
raise EncoderError('No encoder for values of type %r' % value_class)
encoder(s, value)
def _encode(s, values, type_dict=encoders):
for v in values:
_encode_single(s, v, type_dict=type_dict)
def _encode_variable_name(s, name):
try:
name_class = name.__class__
except AttributeError:
name_class = None
value = name
if name_class == unicode:
value = name.encode('utf-8')
elif name_class not in (str, buffer):
raise EncoderError("Not an acceptable variable name type %r" %
type(name))
if len(value) > 0xffff:
raise EncoderError("Variable name too long")
_encode_property_name(s, value)
##
# public interface
#
def decode(data):
"""Decode AMF0-encoded buffer of data.
@type data: VecBuf
@returns: list of objects
"""
try:
return _decode(data)
except VecBufEOB:
raise DecoderError('Incomplete encoded data')
def decode_one(data):
"""Decode a single value from AMF0-encoded buffer of data.
@type data: VecBuf
@returns: object
"""
try:
return _decode_single(data)
except VecBufEOB:
raise DecoderError('Incomplete encoded data')
def encode(*args):
"""Encode given values using AMF0.
@rtype: VecBuf
"""
vb = VecBuf()
_encode(vb, args)
return vb
def decode_variable(data):
"""Decode a single FLV data variable from AMF0-encoded buffer of data.
@type data: VecBuf
@returns: (str, object)
"""
try:
return _decode_string(data), _decode_single(data)
except VecBufEOB:
raise DecoderError('Incomplete encoded data')
def encode_variable(name, value):
"""Encode given name and value into an FLV data variable using AMF0.
@type name: str or unicode
@rtype: VecBuf
"""
vb = VecBuf()
_encode_variable_name(vb, name)
_encode_single(vb, value)
return vb
__all__ = ['encode', 'decode', 'encode_variable', 'decode_variable',
'DecoderError', 'EncoderError',
'ECMAArray', 'Object', 'undefined', 'XMLDocument']
| |
"""Inference in propositional logic"""
from __future__ import print_function, division
from sympy.logic.boolalg import And, Not, conjuncts, to_cnf
from sympy.core.compatibility import ordered
from sympy.core.sympify import sympify
from sympy.external.importtools import import_module
def literal_symbol(literal):
"""
The symbol in this literal (without the negation).
Examples
========
>>> from sympy.abc import A
>>> from sympy.logic.inference import literal_symbol
>>> literal_symbol(A)
A
>>> literal_symbol(~A)
A
"""
if literal is True or literal is False:
return literal
try:
if literal.is_Symbol:
return literal
if literal.is_Not:
return literal_symbol(literal.args[0])
else:
raise ValueError
except (AttributeError, ValueError):
raise ValueError("Argument must be a boolean literal.")
def satisfiable(expr, algorithm=None, all_models=False):
"""
Check satisfiability of a propositional sentence.
Returns a model when it succeeds.
Returns {true: true} for trivially true expressions.
On setting all_models to True, if given expr is satisfiable then
returns a generator of models. However, if expr is unsatisfiable
then returns a generator containing the single element False.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import satisfiable
>>> satisfiable(A & ~B)
{A: True, B: False}
>>> satisfiable(A & ~A)
False
>>> satisfiable(True)
{True: True}
>>> next(satisfiable(A & ~A, all_models=True))
False
>>> models = satisfiable((A >> B) & B, all_models=True)
>>> next(models)
{A: False, B: True}
>>> next(models)
{A: True, B: True}
>>> def use_models(models):
... for model in models:
... if model:
... # Do something with the model.
... print(model)
... else:
... # Given expr is unsatisfiable.
... print("UNSAT")
>>> use_models(satisfiable(A >> ~A, all_models=True))
{A: False}
>>> use_models(satisfiable(A ^ A, all_models=True))
UNSAT
"""
if algorithm is None or algorithm == "pycosat":
pycosat = import_module('pycosat')
if pycosat is not None:
algorithm = "pycosat"
else:
if algorithm == "pycosat":
raise ImportError("pycosat module is not present")
# Silently fall back to dpll2 if pycosat
# is not installed
algorithm = "dpll2"
if algorithm == "dpll":
from sympy.logic.algorithms.dpll import dpll_satisfiable
return dpll_satisfiable(expr)
elif algorithm == "dpll2":
from sympy.logic.algorithms.dpll2 import dpll_satisfiable
return dpll_satisfiable(expr, all_models)
elif algorithm == "pycosat":
from sympy.logic.algorithms.pycosat_wrapper import pycosat_satisfiable
return pycosat_satisfiable(expr, all_models)
raise NotImplementedError
def valid(expr):
"""
Check validity of a propositional sentence.
A valid propositional sentence is True under every assignment.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.inference import valid
>>> valid(A | ~A)
True
>>> valid(A | B)
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Validity
"""
return not satisfiable(Not(expr))
def pl_true(expr, model={}, deep=False):
"""
Returns whether the given assignment is a model or not.
If the assignment does not specify the value for every proposition,
this may return None to indicate 'not obvious'.
Parameters
==========
model : dict, optional, default: {}
Mapping of symbols to boolean values to indicate assignment.
deep: boolean, optional, default: False
Gives the value of the expression under partial assignments
correctly. May still return None to indicate 'not obvious'.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.inference import pl_true
>>> pl_true( A & B, {A: True, B: True})
True
>>> pl_true(A & B, {A: False})
False
>>> pl_true(A & B, {A: True})
>>> pl_true(A & B, {A: True}, deep=True)
>>> pl_true(A >> (B >> A))
>>> pl_true(A >> (B >> A), deep=True)
True
>>> pl_true(A & ~A)
>>> pl_true(A & ~A, deep=True)
False
>>> pl_true(A & B & (~A | ~B), {A: True})
>>> pl_true(A & B & (~A | ~B), {A: True}, deep=True)
False
"""
from sympy.core.symbol import Symbol
from sympy.logic.boolalg import BooleanFunction
boolean = (True, False)
def _validate(expr):
if isinstance(expr, Symbol) or expr in boolean:
return True
if not isinstance(expr, BooleanFunction):
return False
return all(_validate(arg) for arg in expr.args)
if expr in boolean:
return expr
expr = sympify(expr)
if not _validate(expr):
raise ValueError("%s is not a valid boolean expression" % expr)
model = dict((k, v) for k, v in model.items() if v in boolean)
result = expr.subs(model)
if result in boolean:
return bool(result)
if deep:
model = dict((k, True) for k in result.atoms())
if pl_true(result, model):
if valid(result):
return True
else:
if not satisfiable(result):
return False
return None
def entails(expr, formula_set={}):
"""
Check whether the given expr_set entail an expr.
If formula_set is empty then it returns the validity of expr.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.inference import entails
>>> entails(A, [A >> B, B >> C])
False
>>> entails(C, [A >> B, B >> C, A])
True
>>> entails(A >> B)
False
>>> entails(A >> (B >> A))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Logical_consequence
"""
formula_set = list(formula_set)
formula_set.append(Not(expr))
return not satisfiable(And(*formula_set))
class KB(object):
"""Base class for all knowledge bases"""
def __init__(self, sentence=None):
self.clauses_ = set()
if sentence:
self.tell(sentence)
def tell(self, sentence):
raise NotImplementedError
def ask(self, query):
raise NotImplementedError
def retract(self, sentence):
raise NotImplementedError
@property
def clauses(self):
return list(ordered(self.clauses_))
class PropKB(KB):
"""A KB for Propositional Logic. Inefficient, with no indexing."""
def tell(self, sentence):
"""Add the sentence's clauses to the KB
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.clauses
[]
>>> l.tell(x | y)
>>> l.clauses
[x | y]
>>> l.tell(y)
>>> l.clauses
[y, x | y]
"""
for c in conjuncts(to_cnf(sentence)):
self.clauses_.add(c)
def ask(self, query):
"""Checks if the query is true given the set of clauses.
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.tell(x & ~y)
>>> l.ask(x)
True
>>> l.ask(y)
False
"""
return entails(query, self.clauses_)
def retract(self, sentence):
"""Remove the sentence's clauses from the KB
Examples
========
>>> from sympy.logic.inference import PropKB
>>> from sympy.abc import x, y
>>> l = PropKB()
>>> l.clauses
[]
>>> l.tell(x | y)
>>> l.clauses
[x | y]
>>> l.retract(x | y)
>>> l.clauses
[]
"""
for c in conjuncts(to_cnf(sentence)):
self.clauses_.discard(c)
| |
# -*- coding: utf-8 -*-
#
# A way to transform a Fasta file quickly
#
import copy
import mmap
import multiprocessing
import os
import sys
import time
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import pysam
from . import exceptions
from . import fasta
from . import fasta_patch
from . import g2g
from . import g2g_utils
from . import vci
LOG = g2g.get_logger()
###########
VCI_FILE = None
###########
class FastaTransformParams(object):
def __init__(self):
# input region, must match the format of the input fasta file
# if fasta file is haploid, format is chr:start-end
# if fasta file is diploid, format is chr(_L|_R):start-end
self.input_region = None
# original input fasta file
self.input_file = None
# temporary directory
self.temp_dir = None
self.full_file = False
self.output_file = None
self.output_region = None
self.output_header = None
# vci information
self.vci_file = None
self.vci_query = None
self.reverse = False
self.gen_temp = True
# offset
self.offset = 0
# patch?
self.patch = False
def __str__(self):
return "Input: {}\n\tOutput: {}\n\tLocation: {}\n\tOffset: {}\n\tOutput Region: {}\n\tOutput Header: {}".format(self.input_file, self.output_file, self.input_region, self.offset, self.output_region, self.output_header)
class FastaTransformResult(object):
def __init__(self):
self.output_file = None
self.snp_count = 0
self.ins_count = 0
self.del_count = 0
def __str__(self):
return "File: {}".format(self.output_file)
#@profile
def process_piece(fasta_transform_params):
"""
"""
# todo: check with patch and make sure that we are using the same type of info
start_time = time.time()
LOG.info(fasta_transform_params)
fasta_transform_result = FastaTransformResult()
fasta_transform_result.output_file = fasta_transform_params.output_file
try:
fasta_file = fasta.FastaFile(fasta_transform_params.input_file)
#vci_file = vci.VCIFile(fasta_transform_params.vci_file, seq_ids=[fasta_transform_params.output_region.seq_id], reverse=fasta_transform_params.reverse)
#vci_file.parse(reverse=fasta_transform_params.reverse)
LOG.debug('fasta_transform_params.input_region={}'.format(fasta_transform_params.input_region))
LOG.debug('fasta_transform_params.input_file={}'.format(fasta_transform_params.input_file))
LOG.debug('fasta_transform_params.output_file={}'.format(fasta_transform_params.output_file))
LOG.debug('fasta_transform_params.output_region={}'.format(fasta_transform_params.output_region))
LOG.debug('fasta_transform_params.output_header={}'.format(fasta_transform_params.output_header))
LOG.debug('fasta_transform_params.vci_query={}'.format(fasta_transform_params.vci_query))
tmp_fasta = g2g_utils.gen_file_name(prefix=g2g_utils.location_to_filestring(fasta_transform_params.input_region) + '_', extension="fa", append_time=False, output_dir=fasta_transform_params.temp_dir)
LOG.debug('tmp_fasta={}'.format(tmp_fasta))
working = open(tmp_fasta, "w")
working.write(">{}\n".format(fasta_transform_params.input_region.seq_id))
LOG.debug("Fasta Fetch {}".format(fasta_transform_params.input_region))
sequence = fasta_file.fetch(fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start - 1, fasta_transform_params.input_region.end)
if len(sequence) < 50:
LOG.debug("Fasta Fetch = {}".format(sequence))
else:
LOG.debug("Fasta Fetch = {} ... {}".format(sequence[:25], sequence[-25:]))
g2g_utils.write_sequence(sequence, working)
working.close()
if fasta_transform_params.patch:
LOG.info("#########################################################################")
LOG.info("############################ PATCHING ##################################")
LOG.info("#########################################################################")
p = fasta_patch.FastaPatchParams()
p.input_region = fasta_transform_params.input_region
# original input fasta file
p.input_file = tmp_fasta
# temporary directory
p.temp_dir = fasta_transform_params.temp_dir
p.output_file = tmp_fasta
p.output_region = fasta_transform_params.input_region
p.output_header = fasta.FastaHeader("{}".format(fasta_transform_params.input_region.seq_id), "{} {}:{}-{}".format(fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end))
# vci information
p.vci_file = fasta_transform_params.vci_file
p.vci_query = fasta_transform_params.vci_query
p.reverse = fasta_transform_params.reverse
p.gen_temp = False
# offset
p.offset = fasta_transform_params.offset
p1 = fasta_patch.process_piece(p)
LOG.debug('\n\n\npatch output = {}\n\n\n'.format(p1.output_file))
fasta_file = fasta.FastaFile(p1.output_file)
fasta_transform_result.snp_count = p1.count
else:
fasta_file = fasta.FastaFile(tmp_fasta)
offset = fasta_transform_params.offset
reverse = fasta_transform_params.reverse
#offset = 0
LOG.info("Transforming {}...".format(fasta_transform_params.input_region))
region = g2g.parse_region(fasta_transform_params.vci_query)
LOG.info("Finding VCI mappings for {}".format(region))
global VCI_FILE
mappings = VCI_FILE.find_mappings(region.seq_id, fasta_transform_params.output_region.start-1, fasta_transform_params.output_region.end)
fasta_out = open(fasta_transform_params.output_file, "w")
if mappings is None:
LOG.info("This region was deleted")
LOG.info("TODO: dump the fasta sequence here")
LOG.info("fasta_transform_params.output_region.seq_id={}".format(fasta_transform_params.output_region.seq_id))
LOG.info("fasta_transform_params.output_file={}".format(fasta_transform_params.output_file))
if fasta_transform_params.full_file:
out_header = ">{} {}:{}-{} from|{}:{}-{}\n".format(fasta_transform_params.output_region.seq_id, fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end, fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end)
else:
out_header = ">{}:{}-{} from|{}:{}-{}\n".format(fasta_transform_params.output_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end, fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end)
fasta_out.write(out_header)
partial_seq = fasta_file.fetch(fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start-1, fasta_transform_params.input_region.end)
if len(partial_seq) < 50:
LOG.debug("Fasta Fetch = {}".format(partial_seq))
else:
LOG.debug("Fasta Fetch = {} ... {}".format(partial_seq[:25], partial_seq[-25:]))
g2g_utils.write_sequence(partial_seq, fasta_out)
return fasta_transform_result
first = True
fasta_transform_result.ins_count = 0
fasta_transform_result.del_count = 0
new_start_pos = mappings[0].to_start
new_end_pos = mappings[-1].to_end
LOG.debug("new_start_pos={}".format(new_start_pos))
last_pos = 0
new_sequence = StringIO()
#LOG.debug("index of '>' is {}".format(new_sequence.getvalue().find('>')))
start = mappings[0].from_start
LOG.debug("Setting start to {} (mappings[0].from_start)".format(start))
found = False
new_sequence_len = 0
LOG.debug('start={}'.format(start))
LOG.debug('last_pos={}'.format(last_pos))
LOG.debug("VCI Fetch {}".format(fasta_transform_params.vci_query))
local_vci_file = vci.VCIFile(fasta_transform_params.vci_file)
for line in local_vci_file.fetch(fasta_transform_params.vci_query, parser=pysam.asTuple()):
aline = line
if line[5] == '.':
continue
found = True
LOG.debug('')
LOG.debug("LINE: {}".format(line))
#new_sequence_value = new_sequence.getvalue()
#if len(new_sequence_value) > 50:
# LOG.debug('current={}...{}'.format(new_sequence_value[:25], new_sequence_value[-25:]))
#else:
# LOG.debug('current={}'.format(new_sequence_value))
# chromosome, position, shared_bases, deleted_bases, inserted_bases, fragment_size
shared_bases = line[2]
deleted_bases = line[3 if not reverse else 4]
deleted_bases_length = len(deleted_bases) if deleted_bases != '.' else 0
inserted_bases = line[4 if not reverse else 3]
inserted_bases_length = len(inserted_bases) if inserted_bases != '.' else 0
fragment_size = int(line[5])
if first:
# fragment_size = (current_pos + shared_bases_length) -
# (previous_pos + previous_shared_length + previous_inserted_length)
LOG.debug('First result in query...')
LOG.debug("Adjusting last_pos from {} to {}".format(last_pos, start))
last_pos = start
LOG.debug("Adjusting fragment_size from {} to {}".format(fragment_size, (int(line[1]) + len(shared_bases)) - (last_pos + 1 + 0)))
fragment_size = (int(line[1]) + len(shared_bases)) - (last_pos + 1 + 0)
first = False
if fragment_size < 0:
continue
LOG.debug('last_pos={}'.format(last_pos))
LOG.debug('offset={}'.format(offset))
LOG.debug('fragment_size={}'.format(fragment_size))
#LOG.debug("Fasta Fetch {}:{}-{} (0-based)".format(fasta_transform_params.input_region.seq_id, last_pos - offset, last_pos + fragment_size - offset))
LOG.debug('extracting... {}-{}'.format(last_pos - offset, last_pos + fragment_size - offset))
partial_seq = fasta_file.fetch(fasta_transform_params.input_region.seq_id, last_pos - offset, last_pos + fragment_size - offset)
if len(partial_seq) < 50:
LOG.debug("Fasta Fetch = {}".format(partial_seq))
else:
LOG.debug("Fasta Fetch = {} ... {}".format(partial_seq[:25], partial_seq[-25:]))
new_sequence.write(partial_seq)
new_sequence_len += fragment_size
#LOG.debug("index of '>' is {}".format(new_sequence.getvalue().find('>')))
if inserted_bases_length > 0:
# insertion
LOG.debug("INSERTION")
new_sequence.write(inserted_bases)
#LOG.debug("index of '>' is {}".format(new_sequence.getvalue().find('>')))
#LOG.debug("{0}:{1}-{2} (Length: {3})".format(location.seqid, last_pos, last_pos + fragment_size, len(partial_seq)))
#if len(partial_seq) > 100:
# LOG.debug("{0}...{1}".format(partial_seq[:10], partial_seq[-10:]))
#else:
# LOG.debug(partial_seq)
LOG.debug("Adding {0}".format(inserted_bases))
#LOG.debug("SAME={0}, {1}".format(shared_bases, partial_seq[-(len(shared_bases)):]))
fasta_transform_result.ins_count += inserted_bases_length
new_sequence_len += inserted_bases_length
if deleted_bases_length > 0:
# deletion
LOG.debug("DELETION")
last_pos += deleted_bases_length
#LOG.debug("skipping ahead {0} bases".format(deleted_bases_length))
fasta_transform_result.del_count += deleted_bases_length
#LOG.debug("last_pos incremented by fragment_size, {} to {}".format(last_pos, last_pos + fragment_size))
last_pos += fragment_size
#LOG.debug("LAST_POS={0}, INSERTIONS={1}, DELETIONS={2}, DIFF={3}".format(last_pos, fasta_transform_result.ins_count, fasta_transform_result.del_count, (fasta_transform_result.ins_count - fasta_transform_result.del_count)))
if found:
LOG.debug("Fetching last bit of sequence")
if last_pos >= fasta_transform_params.input_region.end:
LOG.debug("Nothing to fetch.. done")
else:
LOG.debug("Fasta Fetch {}:{}-{} (0-based)".format(fasta_transform_params.input_region.seq_id, last_pos - offset, fasta_transform_params.input_region.end - offset))
partial_seq = fasta_file.fetch(fasta_transform_params.input_region.seq_id, last_pos - offset, fasta_transform_params.input_region.end - offset)
if len(partial_seq) < 50:
LOG.debug("Fasta Fetch = {}".format(partial_seq))
else:
LOG.debug("Fasta Fetch = {} ... {}".format(partial_seq[:25], partial_seq[-25:]))
new_sequence.write(partial_seq)
#LOG.debug("index of '>' is {}".format(new_sequence.getvalue().find('>')))
new_sequence_len += (fasta_transform_params.input_region.end - last_pos)
else:
LOG.debug("NO INDELS FOUND IN REGION")
LOG.debug("Fetching ONLY bit of sequence")
LOG.debug('start={}'.format(start))
LOG.debug('offset={}'.format(offset))
LOG.debug('fasta_transform_params.input_region.end={}'.format(fasta_transform_params.input_region.end))
LOG.debug("Fasta Fetch {}:{}-{} (0-based)".format(fasta_transform_params.input_region.seq_id, start - offset, fasta_transform_params.input_region.end - offset))
partial_seq = fasta_file.fetch(fasta_transform_params.input_region.seq_id, start - offset, fasta_transform_params.input_region.end - offset)
#if len(partial_seq) < 50:
# LOG.debug("Fasta Fetch = {}".format(partial_seq))
#else:
# LOG.debug("Fasta Fetch = {} ... {}".format(partial_seq[:25], partial_seq[-25:]))
new_sequence.write(partial_seq)
#LOG.debug("index of '>' is {}".format(new_sequence.getvalue().find('>')))
new_sequence_len += len(partial_seq) #(fasta_transform_params.input_region.end - offset) - 1
LOG.debug("the new_sequence_len={}".format(new_sequence_len))
if fasta_transform_params.full_file:
LOG.debug("FULL FILE")
out_header = ">{} {}:{}-{} from|{}:{}-{}\n".format(fasta_transform_params.output_region.seq_id, fasta_transform_params.input_region.seq_id, new_start_pos+1, new_start_pos + new_sequence_len, fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end)
else:
LOG.debug("NOT FULL FILE")
out_header = ">{}:{}-{} from|{}:{}-{}\n".format(fasta_transform_params.output_region.seq_id, new_start_pos+1, new_start_pos + new_sequence_len, fasta_transform_params.input_region.seq_id, fasta_transform_params.input_region.start, fasta_transform_params.input_region.end)
LOG.debug("WRITING HEADER: {}".format(out_header))
fasta_out.write(out_header)
#LOG.debug("index of '>' is {}".format(new_sequence.getvalue().find('>')))
g2g_utils.write_sequence(new_sequence.getvalue(), fasta_out)
fasta_out.close()
except KeyboardInterrupt:
raise exceptions.KeyboardInterruptError()
except exceptions.G2GRegionError as le:
LOG.debug("Unable to parse location, {0}".format(le.message))
raise le
except exceptions.G2GValueError as e:
LOG.debug("Unable to parse alocation, {0}".format(e.message))
raise e
except exceptions.G2GFastaError as e:
LOG.debug("Unable to parse blocation, {0}".format(e.message))
raise e
except TypeError as e:
g2g_utils._show_error()
LOG.debug("Unable to parse clocation, {0}".format(e.message))
raise e
except Exception as e:
g2g_utils._show_error()
LOG.debug("Unable to parse dlocation, {0}".format(e.message))
raise e
LOG.info("Transforming complete for {}".format(fasta_transform_params.input_region))
g2g_utils.delete_index_files(tmp_fasta)
g2g_utils.delete_file(tmp_fasta)
return fasta_transform_result
#LOG.info("Execution complete: {0}".format(g2g_utils.format_time(start_time, time.time())))
def wrapper(args):
"""
Simple wrapper, useful for debugging.
:param args: the arguments to process_piece
:return: the same as process_piece
"""
return process_piece(*args)
def prepare_fasta_transform(filename_fasta, filename_output):
"""
Initialize fasta_transform variables
"""
filename_output = g2g_utils.check_file(filename_output, 'w')
output_file_dir = os.path.abspath(os.path.dirname(filename_output))
new_filename_output = filename_output
# let's figure out what our output name will be
if new_filename_output.lower().endswith('.gz'):
# strip off .gz
new_filename_output = new_filename_output[:-3]
if not filename_output.lower().endswith('.fa'):
raise exceptions.G2GValueError("Expecting output filename extension to be either '.fa.gz' or '.fa'")
g2g_utils.delete_index_files(new_filename_output)
return new_filename_output
def process(filename_fasta, filename_vci, regions, filename_output=None, bgzip=False, reverse=False, num_processes=None, also_patch=False):
"""
Patch a Fasta file by replacing the bases where the SNPs are located in the VCF file.
:param filename_fasta: name of the input Fasta file
:type filename_fasta: string
:param filename_g2g: name of the G2G file
:type filename_g2g: string
:param filename_output: name of the output Fasta file
:type filename_output: string
:param bgzip: compress file in BGZIP format
:type bgzip: boolean
:param reverse: reverse the G2G file
:type reverse: boolean
:param num_processes: the number of processes to spawn
:type num_processes: int
:return: Nothing
"""
start = time.time()
filename_fasta = g2g_utils.check_file(filename_fasta)
filename_vci = g2g_utils.check_file(filename_vci)
if not num_processes:
num_processes = multiprocessing.cpu_count()
else:
if num_processes <= 0:
num_processes = 1
LOG.info("Input Fasta File: {0}".format(filename_fasta))
LOG.info("Input VCI File: {0}".format(filename_vci))
LOG.info("Processes: {0}".format(num_processes))
dump_fasta = False
temp_directory = g2g_utils.create_temp_dir('transform_', dir='.')
LOG.debug("Temp directory: {}".format(temp_directory))
try:
if filename_output:
filename_output = g2g_utils.check_file(filename_output, 'w')
if not regions:
filename_output = prepare_fasta_transform(filename_fasta, filename_output)
LOG.info("Output Fasta File: {0}".format(filename_output))
else:
if bgzip:
if filename_output.lower().endswith((".fa", ".fasta")):
LOG.info("Output Fasta File: {0}.gz".format(filename_output))
elif filename_output.lower().endswith(".gz"):
LOG.info("Output Fasta File: {0}".format(filename_output))
filename_output = filename_output[:-3]
else:
LOG.info("Output Fasta File: {0}".format(filename_output))
else:
filename_output = g2g_utils.gen_file_name(extension="fa", append_time=False, output_dir=temp_directory)
dump_fasta = True
LOG.debug("Temporary fasta file: {}".format(filename_output))
fasta_file = fasta.FastaFile(filename_fasta)
vci_file = vci.VCIFile(filename_vci)
if fasta_file.is_diploid() and vci_file.is_haploid():
raise exceptions.G2GFastaError("Haploid VCI file and diploid Fasta file combination is not currently supported for transform")
full_file = True
if regions:
full_file = False
if len(regions) > 5:
LOG.info("Regions: {} (showing 1st five)".format(", ".join(l for l in map(str, regions[:5]))))
else:
LOG.info("Regions: {}".format(", ".join(l for l in map(str, regions))))
else:
regions = []
for chrom in fasta_file.references:
regions.append(g2g.Region(chrom, 1, fasta_file.get_reference_length(chrom)))
vci_file = vci.VCIFile(filename_vci)
all_params = []
for region in regions:
params = FastaTransformParams()
LOG.debug('region={}'.format(region))
LOG.debug('region.original_base={}'.format(region.original_base))
params.input_region = region
params.input_file = filename_fasta
params.temp_dir = temp_directory
params.vci_file = filename_vci
params.reverse = reverse
params.offset = 0 if region.start <= 1 else region.start - 1
params.patch = also_patch
params.full_file = full_file
if fasta_file.is_haploid() and vci_file.is_diploid():
#LOG.error("*** Experimental ***")
#LOG.error("*** HAPLOID FASTA and DIPLOID VCI ***")
LOG.debug("Fasta file is haploid and VCI file is diploid")
params.output_file = g2g_utils.gen_file_name(prefix=g2g_utils.location_to_filestring(region)+"_L", extension="fa", output_dir=temp_directory, append_time=False)
if full_file:
params.output_region = g2g.Region(region.seq_id+"_L", region.start, region.end)
params.output_header = fasta.FastaHeader(region.seq_id+"_L", "{}:{}-{}".format(region.seq_id, region.start, region.end))
else:
params.output_region = g2g.Region("{}_L".format(region.seq_id), region.start, region.end)
params.output_header = fasta.FastaHeader("{}_L".format(region.seq_id), "{}_L:{}-{}".format(region.seq_id, region.start, region.end))
params.vci_query = "{}_L:{}-{}".format(region.seq_id, region.start, region.end)
all_params.append(params)
params_r = copy.deepcopy(params)
params_r.output_file = g2g_utils.gen_file_name(prefix=g2g_utils.location_to_filestring(region)+"_R", extension="fa", output_dir=temp_directory, append_time=False)
if full_file:
params_r.output_region = g2g.Region(region.seq_id+"_R", region.start, region.end)
params_r.output_header = fasta.FastaHeader(region.seq_id+"_R", "{}:{}-{}".format(region.seq_id, region.start, region.end))
else:
params_r.output_region = g2g.Region("{}_R".format(region.seq_id), region.start, region.end)
params_r.output_header = fasta.FastaHeader("{}_R".format(region.seq_id), "{}_R:{}-{}".format(region.seq_id, region.start, region.end))
params_r.vci_query = "{}_R:{}-{}".format(region.seq_id, region.start, region.end)
all_params.append(params_r)
else:
LOG.debug("VCI file and Fasta file are both: {}".format("HAPLOID" if vci_file.is_haploid() else "DIPLOID"))
params.output_file = g2g_utils.gen_file_name(prefix=g2g_utils.location_to_filestring(region), extension="fa", output_dir=temp_directory, append_time=False)
if full_file:
params.output_region = g2g.Region(region.seq_id, region.start, region.end)
params.output_header = fasta.FastaHeader(region.seq_id, "{}:{}-{}".format(region.seq_id, region.start, region.end))
else:
params.output_region = g2g.Region(region.seq_id, region.start, region.end)
params.output_header = fasta.FastaHeader("{} {}:{}-{}".format(region.seq_id, region.seq_id, region.start, region.end), None)
params.vci_query = "{}:{}-{}".format(region.seq_id, region.start, region.end)
all_params.append(params)
LOG.info("PARSING VCI....")
global VCI_FILE
seq_ids = [g2g.parse_region(p.vci_query).seq_id for p in all_params]
LOG.info(seq_ids)
VCI_FILE = vci.VCIFile(filename_vci, seq_ids=seq_ids)
VCI_FILE.parse(reverse=reverse)
LOG.info("DONE PARSING!")
for ap in all_params:
LOG.debug(str(ap))
args = zip(all_params)
LOG.debug(args)
pool = multiprocessing.Pool(num_processes)
results = pool.map(wrapper, args)
# parse results
total_snp = 0
total_ins = 0
total_del = 0
files = []
for c in results:
if c is not None:
files.append(c.output_file)
total_snp += c.snp_count
total_ins += c.ins_count
total_del += c.del_count
LOG.info("Processed {0:,} SNPs total".format(total_snp))
LOG.info("Processed {0:,} insertions total".format(total_ins))
LOG.info("Processed {0:,} deletions total".format(total_del))
LOG.debug('all temp files created, copy to master temp file and delete');
g2g_utils.concatenate_files(files, filename_output, False)
if dump_fasta:
g2g_utils.dump_file_contents(filename_output)
else:
# move temp to final destination
if bgzip:
# remove the fai
LOG.debug("removing the FAI index for {0}".format(filename_output))
g2g_utils.delete_index_files(filename_output)
LOG.info("Compressing and indexing...")
if filename_output.lower().endswith((".fa", ".fasta")):
g2g_utils.bgzip_and_index_file(filename_output, "{0}.gz".format(filename_output), delete_original=True, file_format='fa')
elif filename_output.lower().endswith(".gz"):
g2g_utils.bgzip_and_index_file(filename_output, filename_output, delete_original=True, file_format='fa')
except KeyboardInterrupt:
raise exceptions.KeyboardInterruptError()
except Exception as e:
raise exceptions.G2GError(str(e))
finally:
# clean up the temporary files
g2g_utils.delete_dir(temp_directory)
LOG.info("Transform complete: {0}".format(g2g_utils.format_time(start, time.time())))
| |
import collections
import json
from unittest import mock
import marshmallow_jsonapi
import pytest
from marshmallow_jsonapi import fields
from flask_jsonapi import api
from flask_jsonapi import filters_schema
from flask_jsonapi import resources
JSONAPI_HEADERS = {'content-type': 'application/vnd.api+json', 'accept': 'application/vnd.api+json'}
@pytest.fixture
def example_schema():
class ExmapleSchema(marshmallow_jsonapi.Schema):
id = fields.UUID(required=True)
body = fields.Str()
class Meta:
type_ = 'example'
self_view_many = 'example_list'
self_view = 'example_detail'
self_view_kwargs = {'example_id': '<id>'}
strict = True
return ExmapleSchema
@pytest.fixture
def example_model():
ExampleModel = collections.namedtuple('ExampleModel', 'id body')
return ExampleModel
def test_integration_get_list(app, example_schema, example_model):
class ExampleListView(resources.ResourceList):
schema = example_schema
def read_many(self, filters, pagination):
return [
example_model(id='f60717a3-7dc2-4f1a-bdf4-f2804c3127a4', body='heheh'),
example_model(id='f60717a3-7dc2-4f1a-bdf4-f2804c3127a5', body='hihi'),
]
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().get(
'/examples/',
headers=JSONAPI_HEADERS
)
result = json.loads(response.data.decode())
assert response.status_code == 200
assert result == {
'data': [
{
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'type': 'example',
'attributes': {
'body': 'heheh'
}
}, {
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a5',
'type': 'example',
'attributes': {
'body': 'hihi'
},
}
],
'jsonapi': {
'version': '1.0'
},
'meta': {
'count': 2
}
}
def test_integration_get_list_with_pagination(app, example_schema, example_model):
class ExampleListView(resources.ResourceList):
schema = example_schema
def read_many(self, filters, pagination):
return [
example_model(id='f60717a3-7dc2-4f1a-bdf4-f2804c3127a4', body='heheh'),
example_model(id='f60717a3-7dc2-4f1a-bdf4-f2804c3127a5', body='hihi'),
]
def get_count(self, filters):
return 5
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().get(
'/examples/?page[size]=2&page[number]=1',
headers=JSONAPI_HEADERS
)
result = json.loads(response.data.decode())
assert response.status_code == 200
assert result == {
'data': [
{
'type': 'example',
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'attributes': {
'body': 'heheh'
}
},
{
'type': 'example',
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a5',
'attributes': {
'body': 'hihi'
}
}
],
'links': {
'self': 'http://localhost/examples/?page[size]=2&page[number]=1',
'first': 'http://localhost/examples/?page[size]=2&page[number]=1',
'previous': None,
'next': 'http://localhost/examples/?page[size]=2&page[number]=2',
'last': 'http://localhost/examples/?page[size]=2&page[number]=3'
},
'meta': {
'count': 2
},
'jsonapi': {
'version': '1.0'
}
}
def test_integration_bad_accept_header(app, example_schema, example_model):
class ExampleListView(resources.ResourceList):
schema = example_schema
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().get('/examples/', headers={'accept': 'text/html'})
assert response.status_code == 406
assert json.loads(response.data.decode()) == {
'errors': [{
'detail': 'Accept header must be application/vnd.api+json',
'source': '',
'status': 406,
'title': 'InvalidRequestHeader'
}],
'jsonapi': {
'version': '1.0'
},
}
def test_integration_bad_content_type_header(app, example_schema, example_model):
class ExampleListView(resources.ResourceList):
schema = example_schema
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().post('/examples/', headers={'accept': 'application/vnd.api+json'})
assert response.status_code == 415
assert json.loads(response.data.decode()) == {
'errors': [{
'detail': 'Content-Type header must be application/vnd.api+json',
'source': '',
'status': 415,
'title': 'InvalidRequestHeader'
}],
'jsonapi': {
'version': '1.0'
},
}
def test_integration_get_filtered_list(app, example_schema, example_model):
class ExampleFiltersSchema(filters_schema.FilterSchema):
basic = filters_schema.FilterField()
listed = filters_schema.ListFilterField()
dumb_name = filters_schema.FilterField(attribute='renamed')
integer = filters_schema.FilterField(type_=fields.Int)
skipped_filter = filters_schema.FilterField()
class ExampleListView(resources.ResourceList):
schema = example_schema
filter_schema = ExampleFiltersSchema()
applied_filters = {}
def read_many(self, filters, pagination):
self.applied_filters.update(filters)
return []
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().get(
'/examples/?filter[basic]=text&filter[listed]=first,second&filter[dumb-name]=another&filter[integer]=3',
headers=JSONAPI_HEADERS
)
assert response.status_code == 200
assert ExampleListView.applied_filters == {
'basic': 'text',
'listed': ['first', 'second'],
'renamed': 'another',
'integer': 3,
}
def test_integration_pagination(app, example_schema):
class ExampleListView(resources.ResourceList):
schema = example_schema
applied_pagination = {}
def read_many(self, filters, pagination):
self.applied_pagination.update(pagination)
return []
def get_count(self, filters):
return 0
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().get(
'/examples/?page[size]=100&page[number]=50',
headers=JSONAPI_HEADERS
)
assert response.status_code == 200
assert ExampleListView.applied_pagination == {
'size': 100,
'number': 50,
}
def test_integration_create_resource(app, example_schema, example_model):
class ExampleListView(resources.ResourceList):
schema = example_schema
def create(self, *args, **kwargs):
return example_model(id='f60717a3-7dc2-4f1a-bdf4-f2804c3127a4', body='Nice body.')
json_data = json.dumps({
'data': {
'type': 'example',
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'attributes': {
'body': "Nice body.",
}
}
})
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().post(
'/examples/',
headers=JSONAPI_HEADERS,
data=json_data,
)
assert json.loads(response.data.decode()) == {
"data": {
"type": "example",
"id": "f60717a3-7dc2-4f1a-bdf4-f2804c3127a4",
"attributes": {
"body": "Nice body."
}
},
"jsonapi": {
"version": "1.0"
}
}
def test_integration_create_resource_invalid_input(app, example_schema, example_model):
class TestSchema(marshmallow_jsonapi.Schema):
id = fields.UUID()
f1 = fields.Str(required=True)
f2 = fields.Str(required=True)
class Meta:
type_ = 'test'
strict = True
class ExampleListView(resources.ResourceList):
schema = TestSchema
def create(self, *args, **kwargs):
return example_model(id='f60717a3-7dc2-4f1a-bdf4-f2804c3127a4', body='Nice body.')
json_data = json.dumps({
'data': {
'type': 'test',
}
})
application_api = api.Api(app)
application_api.route(ExampleListView, 'example_list', '/examples/')
response = app.test_client().post(
'/examples/',
headers=JSONAPI_HEADERS,
data=json_data,
)
result = json.loads(response.data.decode())
assert result == {
'errors': mock.ANY,
"jsonapi": {
"version": "1.0"
}
}
assert list(sorted(result['errors'], key=lambda x: x['source']['pointer'])) == [
{
'detail': 'Missing data for required field.',
'source': {'pointer': '/data/attributes/f1'}
}, {
'detail': 'Missing data for required field.',
'source': {'pointer': '/data/attributes/f2'}
}
]
def test_integration_get(app, example_schema, example_model):
class ExampleDetailView(resources.ResourceDetail):
schema = example_schema
def read(self, id):
return example_model(id=id, body='Gwynbelidd')
application_api = api.Api(app)
application_api.route(ExampleDetailView, 'example_detail', '/examples/<id>/')
response = app.test_client().get(
'/examples/f60717a3-7dc2-4f1a-bdf4-f2804c3127a4/',
headers=JSONAPI_HEADERS
)
result = json.loads(response.data.decode())
assert response.status_code == 200
assert result == {
'data': {
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'type': 'example',
'attributes': {
'body': 'Gwynbelidd'
}
},
'jsonapi': {
'version': '1.0'
}
}
def test_integration_delete(app, example_schema, example_model):
class ExampleDetailView(resources.ResourceDetail):
schema = example_schema
deleted_ids = []
def destroy(self, id):
self.deleted_ids.append(id)
application_api = api.Api(app)
application_api.route(ExampleDetailView, 'example_detail', '/examples/<id>/')
response = app.test_client().delete(
'/examples/f60717a3-7dc2-4f1a-bdf4-f2804c3127a4/',
headers=JSONAPI_HEADERS
)
assert response.status_code == 204
assert response.data == b''
assert ExampleDetailView.deleted_ids == ['f60717a3-7dc2-4f1a-bdf4-f2804c3127a4']
def test_integration_patch(app, example_schema, example_model):
class ExampleDetailView(resources.ResourceDetail):
schema = example_schema
def update(self, id, data):
data.pop('id')
return example_model(id=id, **data)
json_data = json.dumps({
'data': {
'type': 'example',
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'attributes': {
'body': "Nice body.",
}
}
})
application_api = api.Api(app)
application_api.route(ExampleDetailView, 'example_list', '/examples/<id>/')
response = app.test_client().patch(
'/examples/f60717a3-7dc2-4f1a-bdf4-f2804c3127a4/',
headers=JSONAPI_HEADERS,
data=json_data,
)
assert json.loads(response.data.decode()) == {
"data": {
"type": "example",
"id": "f60717a3-7dc2-4f1a-bdf4-f2804c3127a4",
"attributes": {
"body": "Nice body."
}
},
"jsonapi": {
"version": "1.0"
}
}
def test_integration_patch_with_empty_response(app, example_schema, example_model):
class ExampleDetailView(resources.ResourceDetail):
schema = example_schema
def update(self, id, data):
pass
json_data = json.dumps({
'data': {
'type': 'example',
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'attributes': {
'body': "Nice body.",
}
}
})
application_api = api.Api(app)
application_api.route(ExampleDetailView, 'example_list', '/examples/<id>/')
response = app.test_client().patch(
'/examples/f60717a3-7dc2-4f1a-bdf4-f2804c3127a4/',
headers=JSONAPI_HEADERS,
data=json_data,
)
assert response.status_code == 204
assert response.data == b''
def test_creating_view_with_dynamic_schema(app, example_schema, example_model):
class ExampleDetailView(resources.ResourceDetail):
def read(self, id):
return example_model(id=id, body='Gwynbelidd')
application_api = api.Api(app)
application_api.route(ExampleDetailView, 'example_detail', '/examples/<id>/',
view_kwargs={'schema': example_schema})
response = app.test_client().get(
'/examples/f60717a3-7dc2-4f1a-bdf4-f2804c3127a4/',
headers=JSONAPI_HEADERS
)
result = json.loads(response.data.decode())
assert result == {
'data': {
'id': 'f60717a3-7dc2-4f1a-bdf4-f2804c3127a4',
'type': 'example',
'attributes': {
'body': 'Gwynbelidd'
}
},
'jsonapi': {
'version': '1.0'
}
}
| |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
# pylint: disable=maybe-no-member
"""Test request import and updates."""
from nose.plugins import skip
from ggrc import models
from ggrc.converters import errors
from integration.ggrc import converters
class TestRequestImport(converters.TestCase):
"""Basic Request import tests with.
This test suite should test new Request imports and updates. The main focus
of these tests is checking error messages for invalid state transitions.
"""
def setUp(self):
""" Set up for Request test cases """
converters.TestCase.setUp(self)
self.client.get("/login")
def _test_request_users(self, request, users):
""" Test that all users have correct roles on specified Request"""
verification_errors = ""
for user_name, expected_types in users.items():
try:
user = models.Person.query.filter_by(name=user_name).first()
rel = models.Relationship.find_related(request, user)
if expected_types:
self.assertNotEqual(
rel,
None,
"User {} is not mapped to {}".format(user.email, request.slug)
)
self.assertIn("AssigneeType", rel.relationship_attrs)
self.assertEqual(
set(rel.relationship_attrs[
"AssigneeType"].attr_value.split(",")),
expected_types
)
else:
self.assertEqual(
rel,
None,
"User {} is mapped to {}".format(user.email, request.slug)
)
except AssertionError as error:
verification_errors += "\n\nChecks for Users-Request mapping failed "\
"for user '{}' with:\n{}".format(user_name, str(error))
self.assertEqual(verification_errors, "", verification_errors)
def test_request_full_no_warnings(self):
""" Test full request import with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
filename = "request_full_no_warnings.csv"
response = self.import_file(filename)
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
for response_block in response:
for message in messages:
self.assertEqual(set(), set(response_block[message]))
# Test first request line in the CSV file
request_1 = models.Request.query.filter_by(slug="Request 1").first()
users = {
"user 1": {"Assignee"},
"user 2": {"Assignee", "Requester"},
"user 3": {"Requester", "Verifier"},
"user 4": {"Verifier"},
"user 5": {"Verifier"},
}
self._test_request_users(request_1, users)
self.assertEqual(request_1.status, "Open")
self.assertEqual(request_1.request_type, "documentation")
# Test second request line in the CSV file
request_2 = models.Request.query.filter_by(slug="Request 2").first()
users = {
"user 1": {"Assignee"},
"user 2": {"Requester"},
"user 3": {"Verifier"},
"user 4": {},
"user 5": {},
}
self._test_request_users(request_2, users)
self.assertEqual(request_2.status, "In Progress")
self.assertEqual(request_2.request_type, "interview")
def test_request_import_states(self):
""" Test Request state imports
These tests are an intermediate part for zucchini release and will be
updated in the next release.
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=299569476
"""
self.import_file("request_full_no_warnings.csv")
response = self.import_file("request_update_intermediate.csv")
message_types = (
"block_errors",
"block_warnings",
"row_errors",
"row_warnings"
)
messages = {
"block_errors": set(),
"block_warnings": set(),
"row_errors": set(),
"row_warnings": set([
errors.REQUEST_INVALID_STATE.format(line=5),
errors.REQUEST_INVALID_STATE.format(line=6),
errors.REQUEST_INVALID_STATE.format(line=11),
errors.REQUEST_INVALID_STATE.format(line=12),
]),
}
for message_type in message_types:
self.assertEqual(len(set(response[0][message_type])),
len(response[0][message_type]))
self.assertEqual(set(response[0][message_type]), messages[message_type])
requests = {r.slug: r for r in models.Request.query.all()}
self.assertEqual(requests["Request 60"].status, "Open")
self.assertEqual(requests["Request 61"].status, "In Progress")
self.assertEqual(requests["Request 62"].status, "Finished")
self.assertEqual(requests["Request 63"].status, "In Progress")
self.assertEqual(requests["Request 64"].status, "In Progress")
self.assertEqual(requests["Request 3"].status, "In Progress")
self.assertEqual(requests["Request 4"].status, "In Progress")
@skip.SkipTest
def test_request_warnings_errors(self):
""" Test full request import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
self.import_file("request_full_no_warnings.csv")
response = self.import_file("request_with_warnings_and_errors.csv")
message_types = (
"block_errors",
"block_warnings",
"row_errors",
"row_warnings"
)
messages = {
"block_errors": set([]),
"block_warnings": set([
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="error description - non existing column will be "
"ignored"
),
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="actual error ""message"
),
]),
"row_errors": set([
errors.UNKNOWN_OBJECT.format(
line=18,
object_type="Audit",
slug="not existing"
),
errors.DUPLICATE_VALUE_IN_CSV.format(
line_list="19, 21",
column_name="Code",
value="Request 22",
s="",
ignore_lines="21",
),
]),
"row_warnings": set([
errors.UNKNOWN_USER_WARNING.format(
line=14,
email="non_existing@a.com",
),
errors.UNKNOWN_OBJECT.format(
line=14,
object_type="Project",
slug="proj-55"
),
]),
}
for message_type in message_types:
self.assertEqual(len(set(response[0][message_type])),
len(response[0][message_type]))
self.assertEqual(set(response[0][message_type]), messages[message_type])
| |
'''SLR parsing techniques for CFGs.'''
from collections import deque
import pprint
import copy
import cgi
import util.html
from core import ContextFreeGrammar, Terminal, Nonterminal, Marker, Epsilon, \
ProductionRule, PrimedNonterminal
import automaton
from table import ParseTableNormalForm
END_MARKER = Marker('$')
class Item(object):
'''An LR(0) item.'''
def __init__(self, production, dot_pos):
if not isinstance(production, ProductionRule):
raise TypeError('production is not an instance of ProductionRule')
if not (0 <= dot_pos <= len(production.right_side)):
raise ValueError('dot position not within bounds')
self.production = production
self.dot_pos = dot_pos
def after_dot(self):
if self.dot_pos < len(self.production.right_side):
return self.production.right_side[self.dot_pos]
else:
return None
def complete(self):
return self.dot_pos == len(self.production.right_side)
def dot_advanced(self):
return Item(self.production, self.dot_pos + 1)
def __str__(self):
strs = map(str, self.production.right_side)
if any(map(lambda x: len(x) > 1, strs)):
sep = ' '
else:
sep = ''
strs.insert(self.dot_pos, '.')
return '%s -> %s' % (self.production.left_side, sep.join(strs))
def __eq__(self, other):
return isinstance(other, Item) and \
self.dot_pos == other.dot_pos and \
self.production == other.production
def __hash__(self):
return hash((self.production, self.dot_pos))
def _html(self, tohtml, middot):
strs = map(lambda x: tohtml(x), self.production.right_side)
strs.insert(self.dot_pos, middot)
return '%s → %s' % (tohtml(self.production.left_side), ''.join(strs))
def html(self):
return self._html(lambda x: x.html(), '·')
def dot_html(self):
return self._html(lambda x: x.dot_html(), '·')
def is_augmented(grammar):
'''Check if a grammar's start symbol appears at most once on the left side
of a production rule and never appears on the right side.'''
assert isinstance(grammar, ContextFreeGrammar)
return len(filter(lambda x: x.left_side == grammar.start, grammar.productions)) <= 1 \
and not any(filter(lambda x: grammar.start in x.right_side, grammar.productions))
def augmented(grammar):
'''Augment a grammar with a new start symbol, if necessary.'''
if is_augmented(grammar):
return grammar
else:
S0 = PrimedNonterminal.next_unused(grammar.start.name, grammar.nonterminals)
N = [S0] + list(grammar.nonterminals)
T = grammar.terminals
P = [ProductionRule(S0, [grammar.start])] + grammar.productions
return ContextFreeGrammar(N, T, P, S0)
def closure(item, grammar):
'''Compute the closure of a single item.'''
assert isinstance(item, Item)
assert isinstance(grammar, ContextFreeGrammar)
result = [item]
seen = set()
i = 0
while i < len(result):
source_item = result[i]
A = source_item.after_dot()
if isinstance(A, Nonterminal) and A not in seen:
result.extend(
filter(lambda x: x not in result,
map(lambda x: Item(x, 0), grammar.productions_with_left_side(A))))
seen.add(A)
i += 1
return result
def is_kernel_item(item, grammar):
'''Tell if an item is a kernel item.'''
return isinstance(item, Item) and \
(item.dot_pos > 0 or item.production.left_side == grammar.start)
class Closure(object):
'''A closure of a set of items.'''
def __init__(self, kernel_items, grammar):
'''Construct with a set of kernel items and the grammar to which they
belong.'''
for ki in kernel_items:
assert is_kernel_item(ki, grammar)
self.kernel_items = kernel_items
self.grammar = grammar
def closure_nonterminals(self):
'''Return the nonterminals on which the closure has transitions to non-
empty closures.'''
result = []
# Get the nonterminals to the right of a dot in the kernel items.
for item in self.kernel_items:
X = item.after_dot()
if isinstance(X, Nonterminal) and X not in result:
result.append(X)
# For every nonterminal found, include any nonterminals which appear
# at the beginning of rules with those nonterminals on the left side.
i = 0
while i < len(result):
for p in self.grammar.productions_with_left_side(result[i]):
if len(p.right_side) > 0:
X = p.right_side[0]
if isinstance(X, Nonterminal) and X not in result:
result.append(X)
i += 1
return result
def closure_items(self):
'''Enumerate all of the non-kernel or closure items.'''
return [Item(p, 0) for A in self.closure_nonterminals() for p in self.grammar.productions_with_left_side(A)]
def items(self):
'''Enumerate all of the items, kernel and non-kernel.'''
return self.kernel_items + self.closure_items()
def goto_kernel_items(self, X):
'''Enumerate the kernel items to which the closure transitions on a
certain symbol.'''
return [item.dot_advanced() for item in self.items() if item.after_dot() == X]
def goto(self, X):
'''Return the closure to which this closure transitions on a certain
symbol.'''
return Closure(self.goto_kernel_items(X), self.grammar)
def goto_symbols(self):
'''Enumerate the symbols on which this closure has transitions to non-
empty closures.'''
seen = []
for item in self.items():
X = item.after_dot()
if X is not None and X not in seen:
seen.append(X)
return seen
def transitions(self):
'''Enumerate all of the transitions leading out of this closure to non-
empty closures in the form of pairs (X, Ii), where X is a grammar
symbol and Ii is the closure to which the transition leads.'''
return [(X, self.goto(X)) for X in self.goto_symbols()]
def __nonzero__(self):
'''The closure evaluates to True if and only if it is non-empty.'''
return bool(self.kernel_items)
def __eq__(self, other):
return isinstance(other, Closure) and \
set(self.kernel_items) == set(other.kernel_items)
def __str__(self):
return '\n'.join(map(str, self.items()))
def html(self):
lines = ['<tr><td>%s</td></tr>' % i.html() for i in self.items()]
return '''\
<table>
%s
</table>
''' % '\n '.join(lines)
class Automaton(automaton.Automaton):
'''An SLR automaton.'''
def __init__(self, grammar):
assert isinstance(grammar, ContextFreeGrammar)
super(automaton.Automaton, self).__init__()
# Construct initial closure item
self._grammar = Gp = augmented(grammar)
[p0] = Gp.productions_with_left_side(Gp.start)
initial_closure = Closure([Item(p0, 0)], Gp)
# Add initial state
self._states = [initial_closure]
self.add_state(0)
# Add other states in DFS order
i = 0
while i < len(self._states):
for X, I in self._states[i].transitions():
index = self._get_state_index(I)
if index is None:
index = len(self._states)
self._states.append(I)
self.add_transition(i, X, index)
i += 1
def _get_state_index(self, closure):
try:
return self._states.index(closure)
except ValueError:
return None
def augmented_grammar(self):
return self._grammar
def num_states(self):
return len(self._states)
def closure_states(self):
return enumerate(self._states)
def get_state(self, i):
return self._states[i]
def state_dot_label(self, s):
return str(self._states[s])
def state_dot_html_label(self, s):
return '''\
<table>
<tr><td><b>%s</b></td></tr>
%s
</table>
''' % (s, '\n '.join(['<tr><td>%s</td></tr>' % item.dot_html() for item in self._states[s].items()]))
def dot_str(self):
return super(Automaton, self).dot_str(shape='box')
def dot_html(self):
return super(Automaton, self).dot_html(shape='none')
class FirstSetTable(object):
'''The first set table used for the SLR automaton construction algorithm.
'''
def __init__(self, grammar):
self.grammar = grammar
self._compute()
def terminals(self, A):
return self.table[A][0]
def nullable(self, A):
return self.table[A][1]
def string_first(self, s):
result = set()
for X in s:
if X in self.table:
terminals, empty = self.table[X]
result |= terminals
if not empty:
return result, False
else:
result.add(X)
return result, False
return result, True
def _compute(self):
self.table = {A : [set(), False] for A in self.grammar.nonterminals}
pass_number = 1
changed = True
rules = list(self.grammar.productions)
while changed:
old_table = copy.deepcopy(self.table)
changed = False
next_rules = []
for p in rules:
A = p.left_side
for i, Xi in enumerate(p.right_side):
if Xi in self.grammar.nonterminals:
self.table[A][0] |= self.table[Xi][0]
if not self.table[Xi][1]:
next_rules.append(ProductionRule(A, p.right_side[i:]))
break
else:
self.table[A][0].add(Xi)
break
else:
self.table[A][1] = True
if old_table != self.table:
changed = True
rules = next_rules
pass_number += 1
def html(self):
return '''\
<table>
%s
</table>
''' % '\n '.join(['<tr><th>%s</th><td>%s</td></tr>' % \
(X.html(), util.html.html_set(sorted(T) + ([Epsilon()] if e else []))) \
for X, (T, e) in sorted(self.table.items())])
class FollowSetTable(object):
'''The follow set table used for the SLR automaton construction and table
construction algorithms.'''
def __init__(self, first_sets, automaton):
self.first_sets = first_sets
self.automaton = automaton
self.grammar = automaton.augmented_grammar()
self._compute()
def terminals(self, A):
return self.table[A]
def _compute(self):
self.table = {A : set() for A in self.grammar.nonterminals}
self.table[self.grammar.start].add(END_MARKER)
changed = True
while changed:
changed = False
old_table = copy.deepcopy(self.table)
for p in self.grammar.productions:
A = p.left_side
for i, B in enumerate(p.right_side):
if isinstance(B, Nonterminal):
Bfirst, Bempty = self.first_sets.string_first(p.right_side[i+1:])
self.table[B] |= Bfirst
if Bempty:
self.table[B] |= self.table[A]
if old_table != self.table:
changed = True
def html(self):
return '''\
<table>
%s
</table>
''' % '\n '.join(['<tr><th>%s</th><td>%s</td></tr>' % \
(A.html(), util.html.html_set(sorted(T))) \
for A, T in sorted(self.table.items())])
class ParsingTable(object):
'''An SLR parsing table which allows multi-valued entries instead of
treating shift-reduce and reduce-reduce conflicts as errors.'''
SHIFT = 'shift'
REDUCE = 'reduce'
ACCEPT = 'accept'
def __init__(self, *args):
if len(args) == 1:
self._init_grammar(*args)
elif len(args) == 2:
self._init_table(*args)
else:
raise TypeError('ParsingTable takes 1 or 2 arguments')
def _init_grammar(self, grammar):
self._grammar = grammar
self._automaton = M = Automaton(grammar)
self._compute_follow_sets()
# ACTION table
# State i is constructed from Ii. The parsing actions for state i are
# determined as follows:
# - If [A -> alpha . a beta] is in Ii and GOTO(Ii, a) = Ij, then set
# ACTION[i, a] to "shift j." Here a must be a terminal.
# - If [A -> alpha .] is in Ii, then set ACTION[i, a] to
# "reduce A -> alpha" for all a in FOLLOW(A); here A may not be S'.
# - If [S' -> S .] is in Ii, then set ACTION[i, $] to "accept."
# GOTO table
# If GOTO(Ii, A) = Ij, then GOTO[i, A] = j.
self._action = [{} for i in range(M.num_states())]
self._goto = [{} for i in range(M.num_states())]
# Take care of the GOTO table and all of the shift actions.
for i, X, j in M.transitions:
if isinstance(X, Terminal):
self._add_action(i, X, (ParsingTable.SHIFT, j))
else:
self._goto[i][X] = j
for i, Ii in M.closure_states():
for item in Ii.items():
if item.complete():
A = item.production.left_side
if A == M.augmented_grammar().start:
# Add accept action
self._add_action(i, END_MARKER, (ParsingTable.ACCEPT,))
else:
# Add reduce actions
for a in self.follow(A):
self._add_action(i, a, (ParsingTable.REDUCE, M.augmented_grammar().productions.index(item.production)))
def _init_table(self, action, goto):
assert len(action) == len(goto)
self._action = [{a : list(actions) for a, actions in row.iteritems()} for row in action]
self._goto = [{A : state for A, state in row.iteritems()} for row in goto]
self._grammar = None
self._terminals = list(set([a for row in self._action for a, actions in row.iteritems()]))
self._nonterminals = list(set([A for row in self._goto for A, state in row.iteritems()]))
def action(self, i, a):
'''The ACTION function takes as arguments a state i and a terminal a
(or $, the input endmarker). The value of ACTION[i, a] can have one of
four forms:
1. Shift j, where j is a state. The action taken by the parser
effectively shifts input a to the stack, but uses state j to
represent a.
2. Reduce A -> beta. The action of the parser effectively reduces beta
on the top of the stack to head A.
3. Accept. The parser accepts the input and finishes parsing.
4. Error. The parser discovers an error in its input and takes some
corrective action.'''
return self._action[i].get(a, [])
def goto(self, i, A):
'''We extend the GOTO function, defined on sets of items, to states:
if GOTO[Ii, A] = Ij, then GOTO also maps a state i and a nonterminal A
to state j.'''
return self._goto[i].get(A, None)
def follow(self, A):
'''To compute FOLLOW(A) for all nonterminals A, apply the following
rules until nothing can be added to any FOLLOW set.
- Place $ in FOLLOW(S), where S is the start symbol.
- If there is a production A -> alpha B beta, then everything in
FIRST(beta) except epsilon is in FOLLOW(B).
- If there is a production A -> alpha B, or a production
A -> alpha B beta, where FIRST(beta) contains epsilon, then
everything in FOLLOW(A) is in FOLLOW(B).'''
return self._follow_sets.terminals(A)
def original_grammar(self):
'''Return the original grammar which was given.'''
return self._grammar
def augmented_grammar(self):
'''Return the augmented grammar to which the production rule numbers
correspond, if a grammar was given.'''
return self._automaton.augmented_grammar()
def first_sets(self):
'''Return the first set table.'''
return self._first_sets
def follow_sets(self):
'''Return the follow set table.'''
return self._follow_sets
def _compute_first_sets(self, G):
self._first_sets = FirstSetTable(G)
def _compute_follow_sets(self):
M = self._automaton
G = M.augmented_grammar()
self._compute_first_sets(G)
self._follow_sets = FollowSetTable(self._first_sets, M)
def _add_to_follow_set(self, A, terminals):
if A not in self._follow_sets: self._follow_sets[A] = terminals
else: self._follow_sets[A] |= terminals
def _add_action(self, i, X, action):
row = self._action[i]
if X not in row: row[X] = []
row[X].append(action)
def _action_set_html(self, i, a):
actions = self.action(i, a)
if actions:
#return '<table>%s</table>' % \
# (''.join(['<tr><td>%s</td></tr>' % (self._action_html(action),) for action in self.action(i, a)]))
return ','.join(self._action_html(action) for action in self.action(i, a))
return ''
def _action_html(self, action):
if action[0] == ParsingTable.SHIFT: return 'sh%d' % action[1]
elif action[0] == ParsingTable.REDUCE: return 're%s' % action[1]
elif action[0] == ParsingTable.ACCEPT: return 'acc'
def _goto_html(self, i, A):
goto = self.goto(i, A)
return goto if goto is not None else ''
def terminals(self):
if self._grammar is None:
return self._terminals
return sorted(self._grammar.terminals) + [END_MARKER]
def nonterminals(self):
if self._grammar is None:
return self._nonterminals
return sorted(self._grammar.nonterminals)
def _closure_states(self):
if self._grammar is None:
return [(i, None) for i in range(len(self._action))]
return self._automaton.closure_states()
def __str__(self):
return 'ACTION =\n%s\nGOTO =\n%s' % tuple(map(pprint.pformat, (self._action, self._goto)))
def html(self):
return '''\
<table>
<tr><th rowspan="2">STATE</th><th colspan="%d">ACTION</th><th colspan="%d">GOTO</th></tr>
<tr>%s</tr>
%s
</table>
''' % (len(self.terminals()), len(self.nonterminals()),
''.join(['<th>%s</th>' % X.html() for X in self.terminals() + self.nonterminals()]),
'\n '.join(['<tr><th>%d</th>%s</tr>' % \
(i, ''.join(['<td>%s</td>' % s for s in \
[self._action_set_html(i, a) for a in self.terminals()] + \
[self._goto_html(i, A) for A in self.nonterminals()]])) \
for i, Ii in self._closure_states()]))
def to_normal_form(self):
result = ParseTableNormalForm()
for s, row in enumerate(self._action):
for a, cell in row.iteritems():
for subcell in cell:
action = subcell[0]
if action == self.SHIFT:
result.set_gotoshift(s, a, subcell[1])
elif action == self.REDUCE:
result.add_reduction(s, a, subcell[1])
elif action == self.ACCEPT:
result.set_accept(s, a)
for s, row in enumerate(self._goto):
for A, t in row.iteritems():
result.set_gotoshift(s, A, t)
return result
| |
import numpy as np
import tensorflow as tf
import h5py
from sklearn.preprocessing import OneHotEncoder
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import scipy.io
# Functions for initializing neural nets parameters
def weight_variable(shape, var_name):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def bias_variable(shape, var_name):
initial = tf.constant(0.1, shape=shape, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def batch_nm(x, eps=1e-5):
# batch normalization to have zero mean and unit variance
mu, var = tf.nn.moments(x, [0])
return tf.nn.batch_normalization(x, mu, var, None, None, eps)
# Download data from .mat file into numpy array
print('==> Experiment 8f - with dropout')
filepath = '/scratch/ttanpras/exp8a_d7_1s.mat'
print('==> Loading data from {}'.format(filepath))
f = h5py.File(filepath)
data_train = np.array(f.get('trainingFeatures'))
data_val = np.array(f.get('validationFeatures'))
del f
print('==> Data sizes:',data_train.shape, data_val.shape)
# Transform labels into on-hot encoding form
enc = OneHotEncoder(n_values = 71)
'''
NN config parameters
'''
sub_window_size = 32
num_features = 169*sub_window_size
num_frames = 32
hidden_layer_size = 64
num_bits = 64
num_classes = 71
print("Number of features:", num_features)
print("Number of songs:",num_classes)
# Reshape input features
X_train = np.reshape(data_train,(-1, num_features))
X_val = np.reshape(data_val,(-1, num_features))
print("Input sizes:", X_train.shape, X_val.shape)
y_train = []
y_val = []
# Add Labels
for label in range(num_classes):
for sampleCount in range(X_train.shape[0]//num_classes):
y_train.append([label])
for sampleCount in range(X_val.shape[0]//num_classes):
y_val.append([label])
X_train = np.concatenate((X_train, y_train), axis=1)
X_val = np.concatenate((X_val, y_val), axis=1)
# Shuffle
np.random.shuffle(X_train)
np.random.shuffle(X_val)
# Separate coefficients and labels
y_train = X_train[:, -1].reshape(-1, 1)
X_train = X_train[:, :-1]
y_val = X_val[:, -1].reshape(-1, 1)
X_val = X_val[:, :-1]
print('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape)
y_train = enc.fit_transform(y_train.copy()).astype(int).toarray()
y_val = enc.fit_transform(y_val.copy()).astype(int).toarray()
plotx = []
ploty_train = []
ploty_val = []
# Set-up NN layers
x = tf.placeholder(tf.float64, [None, num_features])
W1 = weight_variable([num_features, hidden_layer_size], "W1")
b1 = bias_variable([hidden_layer_size], "b1")
OpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size])
Opb1 = tf.placeholder(tf.float64, [hidden_layer_size])
# Hidden layer activation function: ReLU
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)
W2 = weight_variable([hidden_layer_size, num_bits], "W2")
b2 = bias_variable([num_bits], "b2")
OpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_bits])
Opb2 = tf.placeholder(tf.float64, [num_bits])
# Pre-activation value for bit representation
h = tf.matmul(h1, W2) + b2
h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)
# dropout
keep_prob = tf.placeholder(tf.float64)
h2_drop = tf.nn.dropout(h2, keep_prob)
W3 = weight_variable([num_bits, num_classes], "W3")
b3 = bias_variable([num_classes], "b3")
OpW3 = tf.placeholder(tf.float64, [num_bits, num_classes])
Opb3 = tf.placeholder(tf.float64, [num_classes])
# Softmax layer (Output), dtype = float64
y = tf.matmul(h2_drop, W3) + b3
# NN desired value (labels)
y_ = tf.placeholder(tf.float64, [None, num_classes])
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.InteractiveSession()
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
sess.run(tf.initialize_all_variables())
# Training
numTrainingVec = len(X_train)
batchSize = 500
numEpochs = 1000
bestValErr = 10000
bestValEpoch = 0
startTime = time.time()
for epoch in range(numEpochs):
for i in range(0,numTrainingVec,batchSize):
# Batch Data
batchEndPoint = min(i+batchSize, numTrainingVec)
trainBatchData = X_train[i:batchEndPoint]
trainBatchLabel = y_train[i:batchEndPoint]
train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel, keep_prob: 0.5})
# Print accuracy
if epoch % 5 == 0 or epoch == numEpochs-1:
plotx.append(epoch)
train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel, keep_prob: 1.0})
train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel, keep_prob: 1.0})
val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val, keep_prob: 1.0})
val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val, keep_prob: 1.0})
ploty_train.append(train_error)
ploty_val.append(val_error)
print("epoch: %d, train acc %g, val acc %g, train error %g, val error %g"%(epoch, train_acc, val_acc, train_error, val_error))
if val_error < bestValErr:
bestValErr = val_error
bestValEpoch = epoch
OpW1 = W1
Opb1 = b1
OpW2 = W2
Opb2 = b2
OpW3 = W3
Opb3 = b3
endTime = time.time()
print("Elapse Time:", endTime - startTime)
print("Best validation error: %g at epoch %d"%(bestValErr, bestValEpoch))
# Restore best model for early stopping
W1 = OpW1
b1 = Opb1
W2 = OpW2
b2 = Opb2
W3 = OpW3
b3 = Opb3
print('==> Generating error plot...')
errfig = plt.figure()
trainErrPlot = errfig.add_subplot(111)
trainErrPlot.set_xlabel('Number of Epochs')
trainErrPlot.set_ylabel('Cross-Entropy Error')
trainErrPlot.set_title('Error vs Number of Epochs')
trainErrPlot.scatter(plotx, ploty_train)
valErrPlot = errfig.add_subplot(111)
valErrPlot.scatter(plotx, ploty_val)
errfig.savefig('exp8f_dropout.png')
'''
GENERATING REPRESENTATION OF NOISY FILES
'''
namelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \
'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \
'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \
'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \
'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \
'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100']
outdir = '/scratch/ttanpras/taylorswift_noisy_processed/'
repDict = {}
# Loop over each CQT files, not shuffled
for count in range(len(namelist)):
name = namelist[count]
filename = outdir + name + '.mat'
cqt = scipy.io.loadmat(filename)['Q']
cqt = np.transpose(np.array(cqt))
# Group into windows of 32 without overlapping
# Discard any leftover frames
num_windows = cqt.shape[0] // 32
cqt = cqt[:32*num_windows]
X = np.reshape(cqt,(num_windows, num_features))
# Feed window through model (Only 1 layer of weight w/o non-linearity)
rep = h.eval(feed_dict={x:X})
# Put the output representation into a dictionary
repDict['n'+str(count)] = rep
scipy.io.savemat('exp8f_dropout_repNon.mat',repDict)
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import netaddr
import functools
import new
from nose.tools import *
from ryu.lib import ofctl_v1_2
from ryu.ofproto import ofproto_v1_2, ofproto_v1_2_parser
from ryu.lib import ofctl_v1_3
from ryu.ofproto import ofproto_v1_3, ofproto_v1_3_parser
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import inet
LOG = logging.getLogger('test_ofctl_v1_2, v1_3')
""" Common Functions """
def _str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def _to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value, None
def _to_match_ip(value):
if '/' in value:
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.network)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value, None
def _to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return _str_to_int(value[0]), _str_to_int(value[1])
else:
return _str_to_int(value), None
conv_of10_to_of12_dict = {
'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'
}
conv_of12_to_of10_dict = {
'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'ipv4_dst': 'nw_dst',
'ipv4_src': 'nw_src',
'ip_proto': 'nw_proto',
'vlan_vid': 'dl_vlan',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
""" Test_ofctl """
class Test_ofctl(unittest.TestCase):
def __init__(self, methodName):
super(Test_ofctl, self).__init__(methodName)
def setUp(self):
pass
def tearDown(self):
pass
def _test_actions(self, act, test):
act_type = act["type"]
to_actions = test.to_actions
actions_to_str = test.actions_to_str
dp = ofproto_protocol.ProtocolDesc(version=test.ver)
act_list = []
act_list.append(act)
# str -> action
result = to_actions(dp, act_list)
insts = result[0]
if act_type in test.supported_action:
cls = test.supported_action[act_type]
else:
cls = None
if act_type == 'GOTO_TABLE':
ok_(isinstance(insts, cls))
eq_(insts.table_id, act["table_id"])
elif act_type == 'WRITE_METADATA':
ok_(isinstance(insts, cls))
eq_(insts.metadata, act["metadata"])
eq_(insts.metadata_mask, act["metadata_mask"])
elif act_type == 'METER':
ok_(isinstance(insts, cls))
eq_(insts.meter_id, act["meter_id"])
else:
ok_(isinstance(insts.actions[0], cls))
if act_type == 'OUTPUT':
eq_(insts.actions[0].port, act["port"])
elif act_type == 'SET_MPLS_TTL':
eq_(insts.actions[0].mpls_ttl, act["mpls_ttl"])
elif act_type in ['PUSH_VLAN', 'PUSH_MPLS',
'POP_MPLS', 'PUSH_PBB']:
eq_(insts.actions[0].ethertype, act["ethertype"])
elif act_type == 'SET_QUEUE':
eq_(insts.actions[0].queue_id, act["queue_id"])
elif act_type == 'GROUP':
eq_(insts.actions[0].group_id, act["group_id"])
elif act_type == 'SET_NW_TTL':
eq_(insts.actions[0].nw_ttl, act["nw_ttl"])
# action -> str
action_str = actions_to_str(result)
action_str_list = action_str[0].split(':')
eq_(action_str_list[0], act_type)
if act_type == 'GOTO_TABLE':
eq_(int(action_str_list[1]), act["table_id"])
elif act_type == 'WRITE_METADATA':
met = action_str_list[1].split('/')
eq_(int(met[0], 16), act["metadata"])
eq_(int(met[1], 16), act["metadata_mask"])
elif act_type == 'METER':
eq_(int(action_str_list[1]), act["meter_id"])
else:
if act_type == 'OUTPUT':
eq_(int(action_str_list[1]), act["port"])
elif act_type == 'SET_MPLS_TTL':
eq_(int(action_str_list[1]), act["mpls_ttl"])
elif act_type == 'PUSH_VLAN':
eq_(int(action_str_list[1]), act["ethertype"])
elif act_type == 'PUSH_MPLS':
eq_(int(action_str_list[1]), act["ethertype"])
elif act_type == 'POP_MPLS':
eq_(int(action_str_list[1]), act["ethertype"])
elif act_type == 'SET_QUEUE':
eq_(int(action_str_list[1]), act["queue_id"])
elif act_type == 'GROUP':
eq_(int(action_str_list[1]), act["group_id"])
elif act_type == 'SET_NW_TTL':
eq_(int(action_str_list[1]), act["nw_ttl"])
elif act_type == 'SET_FIELD':
eq_(action_str_list[1].strip(' {'), act["field"])
eq_(action_str_list[2].strip('} '), act["value"])
elif act_type == 'PUSH_PBB':
eq_(int(action_str_list[1]), act["ethertype"])
def _test_to_match(self, attrs, test):
to_match = test.to_match
match_to_str = test.match_to_str
dp = ofproto_protocol.ProtocolDesc(version=test.ver)
ofproto = dp.ofproto
vid_present = dp.ofproto.OFPVID_PRESENT
expected_value = {
"vlan_vid": {
0: {"to_match": 0 | vid_present, "to_str": "0"},
3: {"to_match": 3 | vid_present, "to_str": "3"},
4095: {"to_match": 4095 | vid_present, "to_str": "4095"},
"0": {"to_match": 0 | vid_present, "to_str": "0"},
"3": {"to_match": 3 | vid_present, "to_str": "3"},
"4095": {"to_match": 4095 | vid_present, "to_str": "4095"},
"0x0000": {"to_match": 0x0000, "to_str": "0x0000"},
"0x0003": {"to_match": 0x0003, "to_str": "0x0003"},
"0x0fff": {"to_match": 0x0fff, "to_str": "0x0fff"},
"0x1000": {"to_match": 0x1000, "to_str": "0"},
"0x1003": {"to_match": 0x1003, "to_str": "3"},
"0x1fff": {"to_match": 0x1fff, "to_str": "4095"},
"4096/4096": {"to_match": (4096, 4096),
"to_str": "0x1000/0x1000"},
"4096/4097": {"to_match": (4096, 4097),
"to_str": "0x1000/0x1001"},
"2744/2748": {"to_match": (2744, 2748),
"to_str": "0x0ab8/0x0abc"},
"2748/2748": {"to_match": (2748, 2748),
"to_str": "0x0abc/0x0abc"},
"2748/2749": {"to_match": (2748, 2749),
"to_str": "0x0abc/0x0abd"},
"0x1000/0x1000": {"to_match": (0x1000, 0x1000),
"to_str": "0x1000/0x1000"},
"0x1000/0x1001": {"to_match": (0x1000, 0x1001),
"to_str": "0x1000/0x1001"},
"0x0ab8/0x0abc": {"to_match": (0x0ab8, 0x0abc),
"to_str": "0x0ab8/0x0abc"},
"0x0abc/0x0abc": {"to_match": (0x0abc, 0x0abc),
"to_str": "0x0abc/0x0abc"},
"0x0abc/0x0abd": {"to_match": (0x0abc, 0x0abd),
"to_str": "0x0abc/0x0abd"}
}
}
# str -> match
match = to_match(dp, attrs)
def equal_match(key, value, match):
field_value = match[key]
if key in ['eth_src', 'eth_dst', 'arp_sha', 'arp_tha']:
# MAC address
eth, mask = _to_match_eth(value)
if mask is not None:
# with mask
for i in range(0, len(mask)):
if mask[i] == 'f':
eq_(eth[i], field_value[0][i])
eq_(mask, field_value[1])
else:
# without mask
eq_(eth, field_value)
return
elif key in ['ipv4_src', 'ipv4_dst', 'arp_spa', 'arp_tpa']:
# IPv4 address
ipv4, mask = _to_match_ip(value)
if mask is not None:
# with mask
eq_(ipv4, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv4, field_value)
return
elif key in ['ipv6_src', 'ipv6_dst']:
# IPv6 address
ipv6, mask = _to_match_ip(value)
if mask is not None:
# with mask
eq_(ipv6, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv6, field_value)
return
elif key == 'vlan_vid':
eq_(expected_value['vlan_vid'][value]['to_match'], field_value)
return
elif key == 'metadata' or key == 'ipv6_exthdr':
# Metadata or IPv6 Extension Header pseudo-field
value, mask = _to_match_masked_int(value)
if mask is not None:
# with mask
value &= mask
eq_(value, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(value, field_value)
return
else:
eq_(value, field_value)
return
for key, value in attrs.items():
if key in conv_of10_to_of12_dict:
# For old field name
key_new = conv_of10_to_of12_dict[key]
elif key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key_new = conv[ip_proto][key]
else:
key_new = key
equal_match(key_new, value, match)
# match -> str
match_str = match_to_str(match)
def equal_str(key, value, match_str):
field_value = match_str[key]
if key in ['dl_src', 'dl_dst', 'arp_sha', 'arp_tha']:
# MAC address
eth, mask = _to_match_eth(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
for i in range(0, len(mask)):
if mask[i] == 'f':
eq_(eth[i], field_value[0][i])
eq_(mask, field_value[1])
else:
# without mask
eq_(eth, field_value)
return
elif key in['nw_src', 'nw_dst', 'arp_spa', 'arp_tpa']:
# IPv4 address
ipv4, mask = _to_match_ip(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
eq_(ipv4, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv4, field_value)
return
elif key in ['ipv6_src', 'ipv6_dst']:
# IPv6 address
ipv6, mask = _to_match_ip(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
eq_(ipv6, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv6, field_value)
return
elif key == 'dl_vlan':
eq_(expected_value['vlan_vid'][value]['to_str'], field_value)
return
elif key == 'metadata' or key == 'ipv6_exthdr':
# Metadata or IPv6 Extension Header pseudo-field
value, mask = _to_match_masked_int(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
value &= mask
eq_(str(value), field_value[0])
eq_(str(mask), field_value[1])
else:
# without mask
eq_(str(value), field_value)
return
else:
eq_(value, field_value)
return
for key, value in attrs.items():
if key in conv_of12_to_of10_dict:
key_old = conv_of12_to_of10_dict[key]
else:
key_old = key
equal_str(key_old, value, match_str)
""" Test_data for of_v1_2 """
class test_data_v1_2():
def __init__(self):
self.supported_action = {}
self.supported_match = {}
self.act_list = [
{'type': 'OUTPUT', 'port': 3},
{'type': 'COPY_TTL_OUT'},
{'type': 'COPY_TTL_IN'},
{'type': 'SET_MPLS_TTL', 'mpls_ttl': 64},
{'type': 'DEC_MPLS_TTL'},
{'type': 'PUSH_VLAN', 'ethertype': 0x0800},
{'type': 'POP_VLAN'},
{'type': 'PUSH_MPLS', 'ethertype': 0x0800},
{'type': 'POP_MPLS', 'ethertype': 0x0800},
{'type': 'SET_QUEUE', 'queue_id': 7},
{'type': 'GROUP', 'group_id': 5},
{'type': 'SET_NW_TTL', 'nw_ttl': 64},
{'type': 'DEC_NW_TTL'},
{'type': 'GOTO_TABLE', 'table_id': 8},
{'type': 'WRITE_METADATA', 'metadata': 8,
'metadata_mask': (1 << 64) - 1},
]
self.attr_list = [
{'in_port': 7},
{'in_phy_port': 5, 'in_port': 3},
{'metadata': '0x1212121212121212'},
{'metadata': '0x19af28be37fa91b/0x1010101010101010'},
{'dl_src': "aa:bb:cc:11:22:33"},
{'dl_src': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'dl_dst': "aa:bb:cc:11:22:33"},
{'dl_dst': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'dl_type': 123},
{'eth_src': "aa:bb:cc:11:22:33"},
{'eth_src': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'eth_dst': "aa:bb:cc:11:22:33"},
{'eth_dst': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'eth_type': 0x800},
{'dl_vlan': 0},
{'dl_vlan': 3},
{'dl_vlan': 4095},
{'dl_vlan': "0"},
{'dl_vlan': "3"},
{'dl_vlan': "4095"},
{'dl_vlan': "0x0000"},
{'dl_vlan': "0x0003"},
{'dl_vlan': "0x0fff"},
{'dl_vlan': "0x1000"},
{'dl_vlan': "0x1003"},
{'dl_vlan': "0x1fff"},
{'dl_vlan': "4096/4096"},
{'dl_vlan': "4096/4097"},
{'dl_vlan': "2744/2748"},
{'dl_vlan': "2748/2748"},
{'dl_vlan': "2748/2749"},
{'dl_vlan': "0x1000/0x1000"},
{'dl_vlan': "0x1000/0x1001"},
{'dl_vlan': "0x0ab8/0x0abc"},
{'dl_vlan': "0x0abc/0x0abc"},
{'dl_vlan': "0x0abc/0x0abd"},
{'vlan_pcp': 3, 'vlan_vid': 3},
{'ip_dscp': 3, 'eth_type': 0x0800},
{'ip_ecn': 4, 'eth_type': 0x86dd},
{'nw_src': "192.168.0.1", 'eth_type': 0x0800},
{'nw_src': "192.168.0.1/24", 'eth_type': 0x0800},
{'nw_src': "192.168.10.10/255.255.0.0", 'eth_type': 0x0800},
{'nw_dst': "192.168.0.1", 'eth_type': 0x0800},
{'nw_dst': "192.168.0.1/24", 'eth_type': 0x0800},
{'nw_dst': "192.168.10.10/255.255.255.0"},
{'nw_proto': 5, 'eth_type': 0x0800},
{'ip_proto': 5, 'eth_type': 0x86dd},
{'ipv4_src': "192.168.0.1", 'eth_type': 0x0800},
{'ipv4_src': "192.168.0.1/24", 'eth_type': 0x0800},
{'ipv4_src': "192.168.10.10/255.255.0.0", 'eth_type': 0x0800},
{'ipv4_dst': "192.168.0.1", 'eth_type': 0x0800},
{'ipv4_dst': "192.168.0.1/24", 'eth_type': 0x0800},
{'ipv4_dst': "192.168.10.10/255.255.255.0", 'eth_type': 0x0800},
{'tp_src': 1, 'ip_proto': 6},
{'tp_dst': 2, 'ip_proto': 6},
{'tp_src': 3, 'ip_proto': 17},
{'tp_dst': 4, 'ip_proto': 17},
{'vlan_vid': 0},
{'vlan_vid': 3},
{'vlan_vid': 4095},
{'vlan_vid': "0"},
{'vlan_vid': "3"},
{'vlan_vid': "4095"},
{'vlan_vid': "0x0000"},
{'vlan_vid': "0x0003"},
{'vlan_vid': "0x0fff"},
{'vlan_vid': "0x1000"},
{'vlan_vid': "0x1003"},
{'vlan_vid': "0x1fff"},
{'vlan_vid': "4096/4096"},
{'vlan_vid': "4096/4097"},
{'vlan_vid': "2744/2748"},
{'vlan_vid': "2748/2748"},
{'vlan_vid': "2748/2749"},
{'vlan_vid': "0x1000/0x1000"},
{'vlan_vid': "0x1000/0x1001"},
{'vlan_vid': "0x0ab8/0x0abc"},
{'vlan_vid': "0x0abc/0x0abc"},
{'vlan_vid': "0x0abc/0x0abd"},
{'tcp_src': 3, 'ip_proto': 6},
{'tcp_dst': 5, 'ip_proto': 6},
{'udp_src': 2, 'ip_proto': 17},
{'udp_dst': 6, 'ip_proto': 17},
{'sctp_src': 99, 'ip_proto': 132},
{'sctp_dst': 99, 'ip_proto': 132},
{'icmpv4_type': 5, 'ip_proto': 1},
{'icmpv4_code': 6, 'ip_proto': 1},
{'arp_op': 3, 'eth_type': 0x0806},
{'arp_spa': "192.168.0.11", 'eth_type': 0x0806},
{'arp_spa': "192.168.0.22/24", 'eth_type': 0x0806},
{'arp_tpa': "192.168.0.33", 'eth_type': 0x0806},
{'arp_tpa': "192.168.0.44/24", 'eth_type': 0x0806},
{'arp_sha': "aa:bb:cc:11:22:33", 'eth_type': 0x0806},
{'arp_sha': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff",
'eth_type': 0x0806},
{'arp_tha': "aa:bb:cc:11:22:33", 'eth_type': 0x0806},
{'arp_tha': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff",
'eth_type': 0x0806},
{'ipv6_src': '2001::aaaa:bbbb:cccc:1111', 'eth_type': 0x86dd},
{'ipv6_src': '2001::aaaa:bbbb:cccc:1111/64', 'eth_type': 0x86dd},
{'ipv6_dst': '2001::ffff:cccc:bbbb:1111', 'eth_type': 0x86dd},
{'ipv6_dst': '2001::ffff:cccc:bbbb:1111/64', 'eth_type': 0x86dd},
{'ipv6_flabel': 2, 'eth_type': 0x86dd},
{'icmpv6_type': 3, 'ip_proto': 58},
{'icmpv6_code': 4, 'ip_proto': 58},
{'ipv6_nd_target': '2001::ffff:cccc:bbbb:1111',
'icmpv6_type': 135, 'ip_proto': 58},
{'ipv6_nd_sll': "aa:bb:cc:11:22:33",
'icmpv6_type': 135, 'ip_proto': 58},
{'ipv6_nd_tll': "aa:bb:cc:11:22:33",
'icmpv6_type': 136, 'ip_proto': 58},
{'mpls_label': 3, 'eth_type': 0x8848},
{'mpls_tc': 2, 'eth_type': 0x8848}
]
def set_ver(self, ver):
self.ver = ver
def set_attr(self, ofctl):
self.to_match = getattr(ofctl, "to_match")
self.match_to_str = getattr(ofctl, "match_to_str")
self.to_actions = getattr(ofctl, "to_actions")
self.actions_to_str = getattr(ofctl, "actions_to_str")
def set_action_v1_2(self, parser):
self.supported_action.update(
{
'OUTPUT': getattr(parser, "OFPActionOutput"),
'COPY_TTL_OUT': getattr(parser, "OFPActionCopyTtlOut"),
'COPY_TTL_IN': getattr(parser, "OFPActionCopyTtlIn"),
'SET_MPLS_TTL': getattr(parser, "OFPActionSetMplsTtl"),
'DEC_MPLS_TTL': getattr(parser, "OFPActionDecMplsTtl"),
'PUSH_VLAN': getattr(parser, "OFPActionPushVlan"),
'POP_VLAN': getattr(parser, "OFPActionPopVlan"),
'PUSH_MPLS': getattr(parser, "OFPActionPushMpls"),
'POP_MPLS': getattr(parser, "OFPActionPopMpls"),
'SET_QUEUE': getattr(parser, "OFPActionSetQueue"),
'GROUP': getattr(parser, "OFPActionGroup"),
'SET_NW_TTL': getattr(parser, "OFPActionSetNwTtl"),
'DEC_NW_TTL': getattr(parser, "OFPActionDecNwTtl"),
'SET_FIELD': getattr(parser, "OFPActionSetField"),
'GOTO_TABLE': getattr(parser, "OFPInstructionGotoTable"),
'WRITE_METADATA': getattr(parser,
"OFPInstructionWriteMetadata"),
})
def set_match_v1_2(self, parser):
self.supported_match.update(
{
'in_port': getattr(parser, "MTInPort"),
'in_phy_port': getattr(parser, "MTInPhyPort"),
'metadata': getattr(parser, "MTMetadata"),
'eth_dst': getattr(parser, "MTEthDst"),
'dl_dst': getattr(parser, "MTEthDst"),
'eth_src': getattr(parser, "MTEthSrc"),
'dl_src': getattr(parser, "MTEthSrc"),
'dl_type': getattr(parser, "MTEthType"),
'eth_type': getattr(parser, "MTEthType"),
'dl_vlan': getattr(parser, "MTVlanVid"),
'vlan_vid': getattr(parser, "MTVlanVid"),
'vlan_pcp': getattr(parser, "MTVlanPcp"),
'ip_dscp': getattr(parser, "MTIPDscp"),
'ip_ecn': getattr(parser, "MTIPECN"),
'nw_proto': getattr(parser, "MTIPProto"),
'ip_proto': getattr(parser, "MTIPProto"),
'nw_src': getattr(parser, "MTIPV4Src"),
'nw_dst': getattr(parser, "MTIPV4Dst"),
'ipv4_src': getattr(parser, "MTIPV4Src"),
'ipv4_dst': getattr(parser, "MTIPV4Dst"),
'tp_src': {6: getattr(parser, "MTTCPSrc"),
17: getattr(parser, "MTUDPSrc")},
'tp_dst': {6: getattr(parser, "MTTCPDst"),
17: getattr(parser, "MTUDPDst")},
'tcp_src': getattr(parser, "MTTCPSrc"),
'tcp_dst': getattr(parser, "MTTCPDst"),
'udp_src': getattr(parser, "MTUDPSrc"),
'udp_dst': getattr(parser, "MTUDPDst"),
'sctp_src': getattr(parser, "MTSCTPSrc"),
'sctp_dst': getattr(parser, "MTSCTPDst"),
'icmpv4_type': getattr(parser, "MTICMPV4Type"),
'icmpv4_code': getattr(parser, "MTICMPV4Code"),
'arp_op': getattr(parser, "MTArpOp"),
'arp_spa': getattr(parser, "MTArpSpa"),
'arp_tpa': getattr(parser, "MTArpTpa"),
'arp_sha': getattr(parser, "MTArpSha"),
'arp_tha': getattr(parser, "MTArpTha"),
'ipv6_src': getattr(parser, "MTIPv6Src"),
'ipv6_dst': getattr(parser, "MTIPv6Dst"),
'ipv6_flabel': getattr(parser, "MTIPv6Flabel"),
'icmpv6_type': getattr(parser, "MTICMPV6Type"),
'icmpv6_code': getattr(parser, "MTICMPV6Code"),
'ipv6_nd_target': getattr(parser, "MTIPv6NdTarget"),
'ipv6_nd_sll': getattr(parser, "MTIPv6NdSll"),
'ipv6_nd_tll': getattr(parser, "MTIPv6NdTll"),
'mpls_label': getattr(parser, "MTMplsLabel"),
'mpls_tc': getattr(parser, "MTMplsTc"),
})
""" Test_data for of_v1_3 """
class test_data_v1_3(test_data_v1_2):
def __init__(self):
test_data_v1_2.__init__(self)
self.act_list.extend(
[
{'type': 'PUSH_PBB', 'ethertype': 0x0800},
{'type': 'POP_PBB'},
{'type': 'METER', 'meter_id': 3},
]
)
self.attr_list.extend(
[
{'mpls_bos': 3, 'eth_type': 0x8848},
{'pbb_isid': 5, 'eth_type': 0x88E7},
{'tunnel_id': 7},
{'ipv6_exthdr': 3, 'eth_type': 0x86dd},
{'ipv6_exthdr': "0x40", 'eth_type': 0x86dd},
{'ipv6_exthdr': "0x40/0x1F0", 'eth_type': 0x86dd},
]
)
def set_action_v1_3(self, parser):
self.set_action_v1_2(parser)
self.supported_action.update(
{
'PUSH_PBB': getattr(parser, "OFPActionPushPbb"),
'POP_PBB': getattr(parser, "OFPActionPopPbb"),
'METER': getattr(parser, "OFPInstructionMeter"),
})
def set_match_v1_3(self, parser):
self.set_match_v1_2(parser)
self.supported_match.update(
{
'mpls_bos': getattr(parser, "MTMplsBos"),
'pbb_isid': getattr(parser, "MTPbbIsid"),
'tunnel_id': getattr(parser, "MTTunnelId"),
'ipv6_exthdr': getattr(parser, "MTIPv6ExtHdr"),
})
""" Test_data for of_v1_4 """
# class test_data_v1_4(test_data_v1_3):
# def __init__(self):
# test_data_v1_3.__init__(self)
# def set_action_v1_4(self, parser):
# self.set_action_v1_3(parser)
# def set_match_v1_4(self, parser):
# self.set_match_v1_3(parser)
def _add_tests_actions(cls):
for act in cls.act_list:
method_name = 'test_' + str(cls.ver) + '_' + act["type"] + '_action'
def _run(self, name, act, cls):
print ('processing %s ...' % name)
cls_ = Test_ofctl(name)
cls_._test_actions(act, cls)
print ('adding %s ...' % method_name)
func = functools.partial(_run, name=method_name, act=act, cls=cls)
func.func_name = method_name
func.__name__ = method_name
im = new.instancemethod(func, None, Test_ofctl)
setattr(Test_ofctl, method_name, im)
def _add_tests_match(cls):
for attr in cls.attr_list:
for key, value in attr.items():
method_name = 'test_' + \
str(cls.ver) + '_' + key + '_' + str(
value) + str(type(value)) + '_match'
def _run(self, name, attr, cls):
print ('processing %s ...' % name)
cls_ = Test_ofctl(name)
cls_._test_to_match(attr, cls)
print ('adding %s ...' % method_name)
func = functools.partial(
_run, name=method_name, attr=attr, cls=cls)
func.func_name = method_name
func.__name__ = method_name
im = new.instancemethod(func, None, Test_ofctl)
setattr(Test_ofctl, method_name, im)
""" Test case """
# for of12
cls = test_data_v1_2()
cls.set_action_v1_2(ofproto_v1_2_parser)
cls.set_match_v1_2(ofproto_v1_2_parser)
cls.set_ver(ofproto_v1_2.OFP_VERSION)
cls.set_attr(ofctl_v1_2)
_add_tests_actions(cls)
_add_tests_match(cls)
# for of13
cls = test_data_v1_3()
cls.set_action_v1_3(ofproto_v1_3_parser)
cls.set_match_v1_3(ofproto_v1_3_parser)
cls.set_ver(ofproto_v1_3.OFP_VERSION)
cls.set_attr(ofctl_v1_3)
_add_tests_actions(cls)
_add_tests_match(cls)
# for of14
# cls = test_data_v1_4()
# cls.set_action_v1_4(ofproto_v1_4_parser)
# cls.set_match_v1_4(ofproto_v1_4_parser)
# cls.set_ver(ofproto_v1_4.OFP_VERSION)
# cls.set_attr(ofctl_v1_4)
# _add_tests_actions(cls)
# _add_tests_match(cls)
| |
import os
from contextlib import contextmanager
from . import _compat, constants, exc
def make(config):
"""Generate cache store from supplied config
Args:
config (dict):
The config to extract settings from
Returns:
Any[RedisCache, NoopCache]:
A :class:`Cache` sub-class, based on the `RESIZE_CACHE_STORE`
value.
Raises:
RuntimeError: If another `RESIZE_CACHE_STORE` value was set
"""
if config.cache_store == 'redis':
kw = dict(
host=config.redis_host,
port=config.redis_port,
db=config.redis_db,
password=config.redis_password,
key=config.redis_key,
)
return RedisCache(**kw)
elif config.cache_store == 'noop':
return NoopCache()
else:
raise RuntimeError(
'Non-supported RESIZE_CACHE_STORE value: "{}"'
.format(config.cache_store)
)
class Cache:
"""Cache base class"""
def exists(self, unique_key):
raise NotImplementedError
def add(self, unique_key):
raise NotImplementedError
def remove(self, unique_key):
raise NotImplementedError
def clear(self):
raise NotImplementedError
def all(self):
raise NotImplementedError
def transaction(self, unique_key, ttl=600):
raise NotImplementedError
class NoopCache(Cache):
"""
No-op cache, just to get the same API regardless of whether cache is
used or not.
"""
def exists(self, unique_key):
"""
Check if key exists in cache
Args:
unique_key (str): Unique key to check for
Returns:
bool: Whether key exist in cache or not
"""
return False
def add(self, unique_key):
"""
Add key to cache
Args:
unique_key (str): Add this key to the cache
Returns:
bool: Whether key was added or not
"""
return False
def remove(self, unique_key):
"""
Remove key from cache
Args:
unique_key (str): Remove this key from the cache
Returns:
bool: Whether key was removed or not
"""
return False
def clear(self):
"""
Remove all keys from cache
Returns:
bool: Whether any keys were removed or not
"""
return False
def all(self):
"""
List all keys in cache
Returns:
List[str]: All the keys in the set, as a list
"""
return []
@contextmanager
def transaction(self, unique_key, ttl=600):
"""
No-op context-manager for transactions. Always yields `True`.
"""
yield True
class RedisCache(Cache):
"""A Redis-based cache that works with a single set-type key
Basically just useful for checking whether an expected value in the set
already exists (which is exactly what's needed in Flask-Resize)
"""
def __init__(
self,
host='localhost',
port=6379,
db=0,
password=None,
key=constants.DEFAULT_REDIS_KEY
):
if _compat.redis is None:
raise exc.RedisImportError(
"Redis must be installed for Redis support. "
"Package found @ https://pypi.python.org/pypi/redis."
)
self.key = key
self._host = host
self._port = port
self._db = db
if isinstance(host, _compat.string_types):
self.redis = _compat.redis.StrictRedis(
host=host,
port=port,
db=db,
password=password,
)
else:
self.redis = host
def exists(self, unique_key):
"""
Check if key exists in cache
Args:
unique_key (str): Unique key to check for
Returns:
bool: Whether key exist in cache or not
"""
return self.redis.sismember(self.key, unique_key)
def add(self, unique_key):
"""
Add key to cache
Args:
unique_key (str): Add this key to the cache
Returns:
bool: Whether key was added or not
"""
return bool(self.redis.sadd(self.key, unique_key))
def remove(self, unique_key):
"""
Remove key from cache
Args:
unique_key (str): Remove this key from the cache
Returns:
bool: Whether key was removed or not
"""
return bool(self.redis.srem(self.key, unique_key))
def clear(self):
"""
Remove all keys from cache
Returns:
bool: Whether any keys were removed or not
"""
return bool(self.redis.delete(self.key))
def all(self):
"""
List all keys in cache
Returns:
List[str]: All the keys in the set, as a list
"""
return [v.decode() for v in self.redis.smembers(self.key)]
@contextmanager
def transaction(
self,
unique_key,
ttl=600
):
"""
Context-manager to use when it's important that no one else
handles `unique_key` at the same time (for example when
saving data to a storage backend).
Args:
unique_key (str):
The unique key to ensure atomicity for
ttl (int):
Time before the transaction is deemed irrelevant and discarded
from cache. Is only relevant if the host forcefully restarts.
"""
tkey = '-transaction-'.join([self.key, unique_key])
if self.redis.set(tkey, str(os.getpid()), nx=True):
self.redis.expire(tkey, ttl)
else:
yield False
try:
yield True
finally:
self.redis.delete(tkey)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class WorkflowRunsOperations(object):
"""WorkflowRunsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version. Constant value: "2016-06-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-06-01"
self.config = config
def list(
self, resource_group_name, workflow_name, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of workflow runs.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkflowRunPaged
<azure.mgmt.logic.models.WorkflowRunPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkflowRunPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkflowRunPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, workflow_name, run_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workflow run.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param run_name: The workflow run name.
:type run_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkflowRun <azure.mgmt.logic.models.WorkflowRun>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'runName': self._serialize.url("run_name", run_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkflowRun', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel(
self, resource_group_name, workflow_name, run_name, custom_headers=None, raw=False, **operation_config):
"""Cancels a workflow run.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param run_name: The workflow run name.
:type run_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/cancel'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'runName': self._serialize.url("run_name", run_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| |
# coding: utf8
from __future__ import unicode_literals, print_function
import ujson
import regex as re
from pathlib import Path
import sys
import textwrap
from .compat import basestring_, unicode_, input_
LANGUAGES = {}
_data_path = Path(__file__).parent / 'data'
def set_lang_class(name, cls):
global LANGUAGES
LANGUAGES[name] = cls
def get_lang_class(name):
if name in LANGUAGES:
return LANGUAGES[name]
lang = re.split('[^a-zA-Z0-9]', name, 1)[0]
if lang not in LANGUAGES:
raise RuntimeError('Language not supported: %s' % name)
return LANGUAGES[lang]
def get_data_path(require_exists=True):
if not require_exists:
return _data_path
else:
return _data_path if _data_path.exists() else None
def set_data_path(path):
global _data_path
_data_path = ensure_path(path)
def ensure_path(path):
if isinstance(path, basestring_):
return Path(path)
else:
return path
def read_regex(path):
path = ensure_path(path)
with path.open() as file_:
entries = file_.read().split('\n')
expression = '|'.join(['^' + re.escape(piece) for piece in entries if piece.strip()])
return re.compile(expression)
def compile_prefix_regex(entries):
if '(' in entries:
# Handle deprecated data
expression = '|'.join(['^' + re.escape(piece) for piece in entries if piece.strip()])
return re.compile(expression)
else:
expression = '|'.join(['^' + piece for piece in entries if piece.strip()])
return re.compile(expression)
def compile_suffix_regex(entries):
expression = '|'.join([piece + '$' for piece in entries if piece.strip()])
return re.compile(expression)
def compile_infix_regex(entries):
expression = '|'.join([piece for piece in entries if piece.strip()])
return re.compile(expression)
def normalize_slice(length, start, stop, step=None):
if not (step is None or step == 1):
raise ValueError("Stepped slices not supported in Span objects."
"Try: list(tokens)[start:stop:step] instead.")
if start is None:
start = 0
elif start < 0:
start += length
start = min(length, max(0, start))
if stop is None:
stop = length
elif stop < 0:
stop += length
stop = min(length, max(start, stop))
assert 0 <= start <= stop <= length
return start, stop
def check_renamed_kwargs(renamed, kwargs):
for old, new in renamed.items():
if old in kwargs:
raise TypeError("Keyword argument %s now renamed to %s" % (old, new))
def read_json(location):
with location.open('r', encoding='utf8') as f:
return ujson.load(f)
def parse_package_meta(package_path, package, require=True):
"""
Check if a meta.json exists in a package and return its contents as a
dictionary. If require is set to True, raise an error if no meta.json found.
"""
# TODO: Allow passing in full model path and only require one argument
# instead of path and package name. This lets us avoid passing in an awkward
# empty string in spacy.load() if user supplies full model path.
location = package_path / package / 'meta.json'
if location.is_file():
return read_json(location)
elif require:
raise IOError("Could not read meta.json from %s" % location)
else:
return None
def get_raw_input(description, default=False):
"""
Get user input via raw_input / input and return input value. Takes a
description for the prompt, and an optional default value that's displayed
with the prompt.
"""
additional = ' (default: {d})'.format(d=default) if default else ''
prompt = ' {d}{a}: '.format(d=description, a=additional)
user_input = input_(prompt)
return user_input
def print_table(data, **kwargs):
"""
Print data in table format. Can either take a list of tuples or a
dictionary, which will be converted to a list of tuples.
"""
if type(data) == dict:
data = list(data.items())
tpl_msg = '\n{msg}\n'
tpl_title = '\n \033[93m{msg}\033[0m'
tpl_row =" {:<15}" * len(data[0])
table = '\n'.join([tpl_row.format(l, v) for l, v in data])
if 'title' in kwargs and kwargs['title']:
print(tpl_title.format(msg=kwargs['title']))
print(tpl_msg.format(msg=table))
def print_markdown(data, **kwargs):
"""
Print listed data in GitHub-flavoured Markdown format so it can be
copy-pasted into issues. Can either take a list of tuples or a dictionary,
which will be converted to a list of tuples.
"""
def excl_value(value):
# don't print value if it contains absolute path of directory (i.e.
# personal info). Other conditions can be included here if necessary.
if unicode_(Path(__file__).parent) in value:
return True
if type(data) == dict:
data = list(data.items())
tpl_msg = "\n{msg}\n"
tpl_title = "\n## {msg}"
tpl_row = "* **{l}:** {v}"
markdown = '\n'.join([tpl_row.format(l=l, v=v) for l, v in data if not excl_value(v)])
if 'title' in kwargs and kwargs['title']:
print(tpl_title.format(msg=kwargs['title']))
print(tpl_msg.format(msg=markdown))
def print_msg(*text, **kwargs):
"""
Print formatted message. Each positional argument is rendered as newline-
separated paragraph. If kwarg 'title' exist, title is printed above the text
and highlighted (using ANSI escape sequences manually to avoid unnecessary
dependency).
"""
message = '\n\n'.join([_wrap_text(t) for t in text])
tpl_msg = '\n{msg}\n'
tpl_title = '\n\033[93m{msg}\033[0m'
if 'title' in kwargs and kwargs['title']:
title = _wrap_text(kwargs['title'])
print(tpl_title.format(msg=title))
print(tpl_msg.format(msg=message))
def _wrap_text(text):
"""
Wrap text at given width using textwrap module. Indent should consist of
spaces. Its length is deducted from wrap width to ensure exact wrapping.
"""
wrap_max = 80
indent = ' '
wrap_width = wrap_max - len(indent)
return textwrap.fill(text, width=wrap_width, initial_indent=indent,
subsequent_indent=indent, break_long_words=False,
break_on_hyphens=False)
def sys_exit(*messages, **kwargs):
"""
Performs SystemExit. For modules used from the command line, like
download and link. To print message, use the same arguments as for
print_msg().
"""
if messages:
print_msg(*messages, **kwargs)
sys.exit(0)
| |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Rect
#------------------------------------------------------------------------------
class BaseRect(tuple):
""" A tuple subclass representing an (x, y, width, height)
bounding box. Subclasses should override the __new__ method
to enforce any necessary typing.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return item
def __new__(cls, x=None, y=None, width=None, height=None):
if isinstance(x, (tuple, BaseRect)):
return cls(*x)
c = cls.coerce_type
x = c(x)
if y is None:
y = x
else:
y = c(y)
width = c(width)
if height is None:
height = width
else:
height = c(height)
return super(BaseRect, cls).__new__(cls, (x, y, width, height))
def __getnewargs__(self):
return tuple(self)
def __repr__(self):
template = '%s(x=%s, y=%s, width=%s, height=%s)'
values = (self.__class__.__name__,) + self
return template % values
@property
def x(self):
""" The 'x' position component of the rect.
"""
return self[0]
@property
def y(self):
""" The 'y' position component of the rect.
"""
return self[1]
@property
def width(self):
""" The 'width' size component of the rect.
"""
return self[2]
@property
def height(self):
""" The 'height' size component of the rect.
"""
return self[3]
class Rect(BaseRect):
""" A BaseRect implementation for integer values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0 if item is None else int(item)
@property
def box(self):
""" The equivalent Box for this rect.
"""
x, y, width, height = self
return Box(y, x + width, y + height, x)
@property
def pos(self):
""" The position of the rect as a Pos object.
"""
return Pos(self.x, self.y)
@property
def size(self):
""" The size of the rect as a Size object.
"""
return Size(self.width, self.height)
class RectF(BaseRect):
""" A BaseRect implementation for floating point values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0.0 if item is None else float(item)
@property
def box(self):
""" The equivalent Box for this rect.
"""
x, y, width, height = self
return BoxF(y, x + width, y + height, x)
@property
def pos(self):
""" The position of the rect as a Pos object.
"""
return PosF(self.x, self.y)
@property
def size(self):
""" The size of the rect as a Size object.
"""
return SizeF(self.width, self.height)
#------------------------------------------------------------------------------
# Box
#------------------------------------------------------------------------------
class BaseBox(tuple):
""" A tuple subclass representing a (top, right, bottom, left) box.
Subclasses should override the __new__ method to enforce any typing.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return item
def __new__(cls, top=None, right=None, bottom=None, left=None):
if isinstance(top, (tuple, BaseBox)):
return cls(*top)
c = cls.coerce_type
top = c(top)
if right is None:
right = top
else:
right = c(right)
if bottom is None:
bottom = top
else:
bottom = c(bottom)
if left is None:
left = right
else:
left = c(left)
return super(BaseBox, cls).__new__(cls, (top, right, bottom, left))
def __getnewargs__(self):
return tuple(self)
def __repr__(self):
template = '%s(top=%s, right=%s, bottom=%s, left=%s)'
values = (self.__class__.__name__,) + self
return template % values
@property
def top(self):
""" The 'top' component of the box.
"""
return self[0]
@property
def right(self):
""" The 'right' component of the box.
"""
return self[1]
@property
def bottom(self):
""" The 'bottom' component of the box.
"""
return self[2]
@property
def left(self):
""" The 'left' component of the box.
"""
return self[3]
class Box(BaseBox):
""" A BaseBox implementation for integer values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0 if item is None else int(item)
@property
def rect(self):
""" The equivalent Rect for this box.
"""
top, right, bottom, left = self
return Rect(left, top, right - left, bottom - top)
@property
def size(self):
""" The Size of this box.
"""
top, right, bottom, left = self
return Size(right - left, bottom - top)
@property
def pos(self):
""" The Pos of this box.
"""
return Pos(self.left, self.top)
class BoxF(BaseBox):
""" A BaseBox implementation for floating point values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0.0 if item is None else float(item)
@property
def rect(self):
""" The equivalent Rect for this box.
"""
top, right, bottom, left = self
return RectF(left, top, right - left, bottom - top)
@property
def size(self):
""" The Size of this box.
"""
top, right, bottom, left = self
return SizeF(right - left, bottom - top)
@property
def pos(self):
""" The Pos of this box.
"""
return PosF(self.left, self.top)
#------------------------------------------------------------------------------
# Pos
#------------------------------------------------------------------------------
class BasePos(tuple):
""" A tuple subclass representing a (x, y) positions. Subclasses
should override the __new__ method to enforce any necessary typing.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return item
def __new__(cls, x=None, y=None):
if isinstance(x, (tuple, BasePos)):
return cls(*x)
c = cls.coerce_type
x = c(x)
if y is None:
y = x
else:
y = c(y)
return super(BasePos, cls).__new__(cls, (x, y))
def __getnewargs__(self):
return tuple(self)
def __repr__(self):
template = '%s(x=%s, y=%s)'
values = (self.__class__.__name__,) + self
return template % values
@property
def x(self):
""" The 'x' component of the position.
"""
return self[0]
@property
def y(self):
""" The 'y' component of the position.
"""
return self[1]
class Pos(BasePos):
""" An implementation of BasePos for integer values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0 if item is None else int(item)
class PosF(BasePos):
""" An implementation of BasePos of floating point values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0.0 if item is None else float(item)
#------------------------------------------------------------------------------
# Size
#------------------------------------------------------------------------------
class BaseSize(tuple):
""" A tuple subclass representing a (width, height) size. Subclasses
should override the __new__ method to enforce any necessary typing.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return item
def __new__(cls, width=None, height=None):
if isinstance(width, (tuple, BaseSize)):
return cls(*width)
c = cls.coerce_type
width = c(width)
if height is None:
height = width
else:
height = c(height)
return super(BaseSize, cls).__new__(cls, (width, height))
def __getnewargs__(self):
return tuple(self)
def __repr__(self):
template = '%s(width=%s, height=%s)'
values = (self.__class__.__name__,) + self
return template % values
@property
def width(self):
""" The 'width' component of the size.
"""
return self[0]
@property
def height(self):
""" The 'height' component of the size.
"""
return self[1]
class Size(BaseSize):
""" A BaseSize implementation for integer values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0 if item is None else int(item)
class SizeF(BaseSize):
""" A BaseSize implementation for floating point values.
"""
__slots__ = ()
@staticmethod
def coerce_type(item):
return 0.0 if item is None else float(item)
| |
"""Support for (EMEA/EU-based) Honeywell TCC climate systems.
Such systems include evohome (multi-zone), and Round Thermostat (single zone).
"""
from datetime import datetime, timedelta
import logging
import re
from typing import Any, Dict, Optional, Tuple
import aiohttp.client_exceptions
import evohomeasync
import evohomeasync2
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
HTTP_SERVICE_UNAVAILABLE,
HTTP_TOO_MANY_REQUESTS,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from .const import DOMAIN, EVO_FOLLOW, GWS, STORAGE_KEY, STORAGE_VERSION, TCS
_LOGGER = logging.getLogger(__name__)
ACCESS_TOKEN = "access_token"
ACCESS_TOKEN_EXPIRES = "access_token_expires"
REFRESH_TOKEN = "refresh_token"
USER_DATA = "user_data"
CONF_LOCATION_IDX = "location_idx"
SCAN_INTERVAL_DEFAULT = timedelta(seconds=300)
SCAN_INTERVAL_MINIMUM = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_LOCATION_IDX, default=0): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DEFAULT
): vol.All(cv.time_period, vol.Range(min=SCAN_INTERVAL_MINIMUM)),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def _local_dt_to_aware(dt_naive: datetime) -> datetime:
dt_aware = dt_util.now() + (dt_naive - datetime.now())
if dt_aware.microsecond >= 500000:
dt_aware += timedelta(seconds=1)
return dt_aware.replace(microsecond=0)
def _dt_to_local_naive(dt_aware: datetime) -> datetime:
dt_naive = datetime.now() + (dt_aware - dt_util.now())
if dt_naive.microsecond >= 500000:
dt_naive += timedelta(seconds=1)
return dt_naive.replace(microsecond=0)
def convert_until(status_dict, until_key) -> str:
"""Convert datetime string from "%Y-%m-%dT%H:%M:%SZ" to local/aware/isoformat."""
if until_key in status_dict: # only present for certain modes
dt_utc_naive = dt_util.parse_datetime(status_dict[until_key])
status_dict[until_key] = dt_util.as_local(dt_utc_naive).isoformat()
def convert_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively convert a dict's keys to snake_case."""
def convert_key(key: str) -> str:
"""Convert a string to snake_case."""
string = re.sub(r"[\-\.\s]", "_", str(key))
return (string[0]).lower() + re.sub(
r"[A-Z]", lambda matched: "_" + matched.group(0).lower(), string[1:]
)
return {
(convert_key(k) if isinstance(k, str) else k): (
convert_dict(v) if isinstance(v, dict) else v
)
for k, v in dictionary.items()
}
def _handle_exception(err) -> bool:
"""Return False if the exception can't be ignored."""
try:
raise err
except evohomeasync2.AuthenticationError:
_LOGGER.error(
"Failed to authenticate with the vendor's server. "
"Check your network and the vendor's service status page. "
"Also check that your username and password are correct. "
"Message is: %s",
err,
)
return False
except aiohttp.ClientConnectionError:
# this appears to be common with Honeywell's servers
_LOGGER.warning(
"Unable to connect with the vendor's server. "
"Check your network and the vendor's service status page. "
"Message is: %s",
err,
)
return False
except aiohttp.ClientResponseError:
if err.status == HTTP_SERVICE_UNAVAILABLE:
_LOGGER.warning(
"The vendor says their server is currently unavailable. "
"Check the vendor's service status page."
)
return False
if err.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"The vendor's API rate limit has been exceeded. "
"If this message persists, consider increasing the %s.",
CONF_SCAN_INTERVAL,
)
return False
raise # we don't expect/handle any other Exceptions
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Create a (EMEA/EU-based) Honeywell evohome system."""
async def load_auth_tokens(store) -> Tuple[Dict, Optional[Dict]]:
app_storage = await store.async_load()
tokens = dict(app_storage if app_storage else {})
if tokens.pop(CONF_USERNAME, None) != config[DOMAIN][CONF_USERNAME]:
# any tokens wont be valid, and store might be be corrupt
await store.async_save({})
return ({}, None)
# evohomeasync2 requires naive/local datetimes as strings
if tokens.get(ACCESS_TOKEN_EXPIRES) is not None:
tokens[ACCESS_TOKEN_EXPIRES] = _dt_to_local_naive(
dt_util.parse_datetime(tokens[ACCESS_TOKEN_EXPIRES])
)
user_data = tokens.pop(USER_DATA, None)
return (tokens, user_data)
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
tokens, user_data = await load_auth_tokens(store)
client_v2 = evohomeasync2.EvohomeClient(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
**tokens,
session=async_get_clientsession(hass),
)
try:
await client_v2.login()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
return False
finally:
config[DOMAIN][CONF_PASSWORD] = "REDACTED"
loc_idx = config[DOMAIN][CONF_LOCATION_IDX]
try:
loc_config = client_v2.installation_info[loc_idx][GWS][0][TCS][0]
except IndexError:
_LOGGER.error(
"Config error: '%s' = %s, but the valid range is 0-%s. "
"Unable to continue. Fix any configuration errors and restart HA.",
CONF_LOCATION_IDX,
loc_idx,
len(client_v2.installation_info) - 1,
)
return False
_LOGGER.debug("Config = %s", loc_config)
client_v1 = evohomeasync.EvohomeClient(
client_v2.username,
client_v2.password,
user_data=user_data,
session=async_get_clientsession(hass),
)
hass.data[DOMAIN] = {}
hass.data[DOMAIN]["broker"] = broker = EvoBroker(
hass, client_v2, client_v1, store, config[DOMAIN]
)
await broker.save_auth_tokens()
await broker.update() # get initial state
hass.async_create_task(async_load_platform(hass, "climate", DOMAIN, {}, config))
if broker.tcs.hotwater:
hass.async_create_task(
async_load_platform(hass, "water_heater", DOMAIN, {}, config)
)
hass.helpers.event.async_track_time_interval(
broker.update, config[DOMAIN][CONF_SCAN_INTERVAL]
)
return True
class EvoBroker:
"""Container for evohome client and data."""
def __init__(self, hass, client, client_v1, store, params) -> None:
"""Initialize the evohome client and its data structure."""
self.hass = hass
self.client = client
self.client_v1 = client_v1
self._store = store
self.params = params
loc_idx = params[CONF_LOCATION_IDX]
self.config = client.installation_info[loc_idx][GWS][0][TCS][0]
self.tcs = (
client.locations[loc_idx] # pylint: disable=protected-access
._gateways[0]
._control_systems[0]
)
self.temps = None
async def save_auth_tokens(self) -> None:
"""Save access tokens and session IDs to the store for later use."""
# evohomeasync2 uses naive/local datetimes
access_token_expires = _local_dt_to_aware(self.client.access_token_expires)
app_storage = {CONF_USERNAME: self.client.username}
app_storage[REFRESH_TOKEN] = self.client.refresh_token
app_storage[ACCESS_TOKEN] = self.client.access_token
app_storage[ACCESS_TOKEN_EXPIRES] = access_token_expires.isoformat()
if self.client_v1 and self.client_v1.user_data:
app_storage[USER_DATA] = {
"userInfo": {"userID": self.client_v1.user_data["userInfo"]["userID"]},
"sessionId": self.client_v1.user_data["sessionId"],
}
else:
app_storage[USER_DATA] = None
await self._store.async_save(app_storage)
async def _update_v1(self, *args, **kwargs) -> None:
"""Get the latest high-precision temperatures of the default Location."""
def get_session_id(client_v1) -> Optional[str]:
user_data = client_v1.user_data if client_v1 else None
return user_data.get("sessionId") if user_data else None
session_id = get_session_id(self.client_v1)
try:
temps = list(await self.client_v1.temperatures(force_refresh=True))
except aiohttp.ClientError as err:
_LOGGER.warning(
"Unable to obtain the latest high-precision temperatures. "
"Check your network and the vendor's service status page. "
"Proceeding with low-precision temperatures. "
"Message is: %s",
err,
)
self.temps = None # these are now stale, will fall back to v2 temps
else:
if (
str(self.client_v1.location_id)
!= self.client.locations[self.params[CONF_LOCATION_IDX]].locationId
):
_LOGGER.warning(
"The v2 API's configured location doesn't match "
"the v1 API's default location (there is more than one location), "
"so the high-precision feature will be disabled"
)
self.client_v1 = self.temps = None
else:
self.temps = {str(i["id"]): i["temp"] for i in temps}
_LOGGER.debug("Temperatures = %s", self.temps)
if session_id != get_session_id(self.client_v1):
await self.save_auth_tokens()
async def _update_v2(self, *args, **kwargs) -> None:
"""Get the latest modes, temperatures, setpoints of a Location."""
access_token = self.client.access_token
loc_idx = self.params[CONF_LOCATION_IDX]
try:
status = await self.client.locations[loc_idx].status()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
else:
self.hass.helpers.dispatcher.async_dispatcher_send(DOMAIN)
_LOGGER.debug("Status = %s", status[GWS][0][TCS][0])
if access_token != self.client.access_token:
await self.save_auth_tokens()
async def update(self, *args, **kwargs) -> None:
"""Get the latest state data of an entire evohome Location.
This includes state data for a Controller and all its child devices, such as the
operating mode of the Controller and the current temp of its children (e.g.
Zones, DHW controller).
"""
await self._update_v2()
if self.client_v1:
await self._update_v1()
# inform the evohome devices that state data has been updated
self.hass.helpers.dispatcher.async_dispatcher_send(DOMAIN)
class EvoDevice(Entity):
"""Base for any evohome device.
This includes the Controller, (up to 12) Heating Zones and (optionally) a
DHW controller.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize the evohome entity."""
self._evo_device = evo_device
self._evo_broker = evo_broker
self._evo_tcs = evo_broker.tcs
self._unique_id = self._name = self._icon = self._precision = None
self._supported_features = None
self._device_state_attrs = {}
@callback
def _refresh(self) -> None:
self.async_schedule_update_ha_state(force_refresh=True)
@property
def should_poll(self) -> bool:
"""Evohome entities should not be polled."""
return False
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the Evohome entity."""
return self._name
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the Evohome-specific state attributes."""
status = self._device_state_attrs
if "systemModeStatus" in status:
convert_until(status["systemModeStatus"], "timeUntil")
if "setpointStatus" in status:
convert_until(status["setpointStatus"], "until")
if "stateStatus" in status:
convert_until(status["stateStatus"], "until")
return {"status": convert_dict(status)}
@property
def icon(self) -> str:
"""Return the icon to use in the frontend UI."""
return self._icon
@property
def supported_features(self) -> int:
"""Get the flag of supported features of the device."""
return self._supported_features
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
self.hass.helpers.dispatcher.async_dispatcher_connect(DOMAIN, self._refresh)
@property
def precision(self) -> float:
"""Return the temperature precision to use in the frontend UI."""
return self._precision
@property
def temperature_unit(self) -> str:
"""Return the temperature unit to use in the frontend UI."""
return TEMP_CELSIUS
async def _call_client_api(self, api_function, refresh=True) -> Any:
try:
result = await api_function
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
if not _handle_exception(err):
return
if refresh is True:
self.hass.helpers.event.async_call_later(1, self._evo_broker.update())
return result
class EvoChild(EvoDevice):
"""Base for any evohome child.
This includes (up to 12) Heating Zones and (optionally) a DHW controller.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize a evohome Controller (hub)."""
super().__init__(evo_broker, evo_device)
self._schedule = {}
self._setpoints = {}
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature of a Zone."""
if not self._evo_device.temperatureStatus["isAvailable"]:
return None
if self._evo_broker.temps:
return self._evo_broker.temps[self._evo_device.zoneId]
return self._evo_device.temperatureStatus["temperature"]
@property
def setpoints(self) -> Dict[str, Any]:
"""Return the current/next setpoints from the schedule.
Only Zones & DHW controllers (but not the TCS) can have schedules.
"""
if not self._schedule["DailySchedules"]:
return {} # no schedule {'DailySchedules': []}, so no scheduled setpoints
day_time = dt_util.now()
day_of_week = int(day_time.strftime("%w")) # 0 is Sunday
time_of_day = day_time.strftime("%H:%M:%S")
try:
# Iterate today's switchpoints until past the current time of day...
day = self._schedule["DailySchedules"][day_of_week]
sp_idx = -1 # last switchpoint of the day before
for i, tmp in enumerate(day["Switchpoints"]):
if time_of_day > tmp["TimeOfDay"]:
sp_idx = i # current setpoint
else:
break
# Did the current SP start yesterday? Does the next start SP tomorrow?
this_sp_day = -1 if sp_idx == -1 else 0
next_sp_day = 1 if sp_idx + 1 == len(day["Switchpoints"]) else 0
for key, offset, idx in [
("this", this_sp_day, sp_idx),
("next", next_sp_day, (sp_idx + 1) * (1 - next_sp_day)),
]:
sp_date = (day_time + timedelta(days=offset)).strftime("%Y-%m-%d")
day = self._schedule["DailySchedules"][(day_of_week + offset) % 7]
switchpoint = day["Switchpoints"][idx]
dt_local_aware = _local_dt_to_aware(
dt_util.parse_datetime(f"{sp_date}T{switchpoint['TimeOfDay']}")
)
self._setpoints[f"{key}_sp_from"] = dt_local_aware.isoformat()
try:
self._setpoints[f"{key}_sp_temp"] = switchpoint["heatSetpoint"]
except KeyError:
self._setpoints[f"{key}_sp_state"] = switchpoint["DhwState"]
except IndexError:
self._setpoints = {}
_LOGGER.warning(
"Failed to get setpoints - please report as an issue", exc_info=True
)
return self._setpoints
async def _update_schedule(self) -> None:
"""Get the latest schedule."""
if "DailySchedules" in self._schedule and not self._schedule["DailySchedules"]:
if not self._evo_device.setpointStatus["setpointMode"] == EVO_FOLLOW:
return # avoid unnecessary I/O - there's nothing to update
self._schedule = await self._call_client_api(
self._evo_device.schedule(), refresh=False
)
_LOGGER.debug("Schedule['%s'] = %s", self.name, self._schedule)
async def async_update(self) -> None:
"""Get the latest state data."""
next_sp_from = self._setpoints.get("next_sp_from", "2000-01-01T00:00:00+00:00")
if dt_util.now() >= dt_util.parse_datetime(next_sp_from):
await self._update_schedule() # no schedule, or it's out-of-date
self._device_state_attrs = {"setpoints": self.setpoints}
| |
#! /usr/bin/env python
from __future__ import division
from __future__ import print_function
def main():
import argparse
import pysam
import vcf
from pyfasta import Fasta
import os
import tempfile
import re
import pandas
from collections import OrderedDict
from fisher import pvalue
import sys
import gzip
import csv
from IGV import IGV
from multiprocessing import Process, Queue
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
def can_create_file(folder_path):
try:
tempfile.TemporaryFile(dir=folder_path)
return True
except OSError:
return False
def findCpGs(fafile, chrom, pos, distance):
minpos = 0 if pos<distance else pos-distance
sequence = fafile[chrom][minpos:pos+distance]
CpGs = [m.start() for m in re.finditer('CG', sequence, flags=re.IGNORECASE)]
return [x+minpos for x in CpGs]
def type_of_read(read):
# Work out where the methylation information is in the CpG site, and whether to complement it
# Depends on read1/read2 and forward/reverse status
if read.is_read1 and not read.is_reverse: # First, forward
offset = 0
to_complement = False
elif not read.is_read1 and read.is_reverse: # Second, reverse
offset = 0
to_complement = False
elif read.is_read1 and read.is_reverse: # First, reverse
offset = 1
to_complement = True
elif not read.is_read1 and not read.is_reverse: # Second, forward
offset = 1
to_complement = True
return offset, to_complement
def IGV_reader(queue):
## Read from the queue
while True:
msg = queue.get() # Read from the queue and do nothing
if (msg == 'DONE'):
break
chrom, pos, ref, alt, ref_filename, alt_filename = msg.split(",")
pos = int(pos)
png_filename = os.path.basename("%s.%s.%s.%s.%s.png" % (args.prefix, chrom, pos, ref, alt))
igv.load("file://"+os.path.abspath(ref_filename))
igv.load("file://"+os.path.abspath(alt_filename))
igv.go("%s:%s-%s" % (chrom, pos-250, pos+250))
igv.send("collapse")
igv.send("region %s %s %s" % (chrom, pos+1, pos+2)) # 1 based co-ordinates for IGV
igv.save(png_filename)
igv.clear()
def exportBAMs(chrom, pos, ref, alt, minpos, maxpos, ref_readnames, alt_readnames):
ref_filename = "%s.%s.%s.ref.%s.bam" % (args.prefix, chrom, pos, ref)
ref_bam = pysam.AlignmentFile(ref_filename, "wb", template=samfile)
alt_filename = "%s.%s.%s.alt.%s.bam" % (args.prefix, chrom, pos, alt)
alt_bam = pysam.AlignmentFile(alt_filename, "wb", template=samfile)
for read in samfile.fetch(chrom, minpos, maxpos):
if read.query_name in ref_readnames:
ref_bam.write(read)
elif read.query_name in alt_readnames:
alt_bam.write(read)
ref_bam.close()
alt_bam.close()
pysam.index(ref_filename)
pysam.index(alt_filename)
if args.IGV_screenshot:
IGV_queue.put("%s,%s,%s,%s,%s,%s" % (chrom, pos, ref, alt, ref_filename, alt_filename))
def processReadsAtPosition(chrom, pos, ref, alt, CpGs, ref_readnames, alt_readnames, min_coverage,
min_region_CpGs):
# PASS 2 - iterate through the CpG sites around the SNP, and count C/Ts in REF/ALT in reads
CpGs_bases = pandas.DataFrame(OrderedDict([
('SNP.chr', chrom),
('SNP.pos', pos),
('SNP.ref', ref),
('SNP.alt', alt),
('CpG.pos', CpGs),
('ref.A', 0),
('ref.C', 0),
('ref.G', 0),
('ref.T', 0),
('ref.N', 0),
('alt.A', 0),
('alt.C', 0),
('alt.G', 0),
('alt.T', 0),
('alt.N', 0)]))
for read in samfile.fetch(chrom, CpGs[0]-1, CpGs[len(CpGs)-1]+1): # extra 1bp buffer
# Is this a REF, ALT or neither read?
if read.query_name in ref_readnames:
read_type = 'ref.'
elif read.query_name in alt_readnames:
read_type = 'alt.'
else:
read_type = None
if read_type is not None:
offset, to_complement = type_of_read(read)
# Iterate through all aligned read positions, and store methylation calls
for pair in read.get_aligned_pairs():
if pair[0] is not None and pair[1] is not None:
try:
i = CpGs.index(pair[1]-offset)
this_base = read.query_sequence[pair[0]]
if to_complement:
this_base = complement[this_base]
CpGs_bases.ix[i, read_type+this_base] += 1
except ValueError:
pass
# Subset to rows with minimum coverage
# Calculate coverage and methylation per CpG site
CpGs_bases["ref.cov"] = [CpGs_bases.loc[i, ["ref.C", "ref.T"]].sum() for i in CpGs_bases.index]
CpGs_bases["alt.cov"] = [CpGs_bases.loc[i, ["alt.C", "alt.T"]].sum() for i in CpGs_bases.index]
CpGs_bases = CpGs_bases[CpGs_bases["ref.cov"] >= min_coverage]
CpGs_bases = CpGs_bases[CpGs_bases["alt.cov"] >= min_coverage]
if len(CpGs_bases.index)>0: # If rows are left
CpGs_bases["ref.meth"] = [CpGs_bases["ref.C"][i] / CpGs_bases["ref.cov"][i] for i in CpGs_bases.index]
CpGs_bases["alt.meth"] = [CpGs_bases["alt.C"][i] / CpGs_bases["alt.cov"][i] for i in CpGs_bases.index]
CpGs_bases["meth.diff"] = [CpGs_bases["ref.meth"][i] - CpGs_bases["alt.meth"][i] for i in CpGs_bases.index]
# Calculate fisher pvalue per CpG site
CpGs_bases["pvalue"] = [pvalue(*CpGs_bases.loc[i, ["ref.C", "ref.T", "alt.C", "alt.T"]].tolist()).two_tail for i in CpGs_bases.index]
# Export sites table
CpGs_bases.to_csv(out_sites, header=False, index=False)
if len(CpGs_bases.index) >= min_region_CpGs: # If >=3 CpG sites, calculate fisher pvalue for pool for region and export
output = "%s,%s,%s,%s,%s,%s,%s," % (
chrom, pos, ref, alt, # SNP position
CpGs_bases["CpG.pos"].tolist()[0], CpGs_bases["CpG.pos"].tolist()[-1], # First and last CpG sites of region
len(CpGs_bases.index)) # Number of CpG sites in region
# Sums of counts across the region
CpGs_sums = CpGs_bases[["ref.C", "ref.T", "alt.C", "alt.T", "ref.cov", "alt.cov"]].sum(0).tolist()
output += "%s,%s,%s,%s,%s,%s," % tuple(CpGs_sums)
# Methylation ratios and pvalue
ref_meth = CpGs_sums[0] / CpGs_sums[4]
alt_meth = CpGs_sums[1] / CpGs_sums[5]
meth_diff = ref_meth-alt_meth
p_value = pvalue(*CpGs_sums[0:4]).two_tail
output += "%s,%s,%s,%s\n" % (ref_meth, alt_meth, meth_diff, p_value)
# Export row for this region
out_regions.write(output)
# Export BAM per allele if feature is turned on and region meets fisher_cutoff
if args.region_bams and p_value <= args.fisher_cutoff:
print(" - Regional fisher exact p_value: %s - exporting BAMs" % p_value)
exportBAMs(chrom, pos, ref, alt, CpGs[0]-1, CpGs[len(CpGs)-1]+1,
ref_readnames, alt_readnames)
def processCpG(chrom, pos, cutoff_mapq, cutoff_baseq):
"""
Find readnames of all reads that are meth or unmeth at the specified CpG position
"""
M_readnames = set()
U_readnames = set()
n_mapq = 0
n_baseq = 0
for pileup in samfile.pileup(chrom, pos, pos+1):
if pileup.reference_pos == pos: # filter for position of interest
print("Processing %s reads covering CpG position %s:%s" % (
len(pileup.pileups), chrom, pos))
for read in pileup.pileups:
# read mapping quality filter
if read.alignment.mapping_quality >= cutoff_mapq:
n_mapq += 1
offset, to_complement = type_of_read(read.alignment)
if read.query_position + offset < len(read.alignment.query_sequence):
CpG_base = read.alignment.query_sequence[read.query_position + offset]
if to_complement:
CpG_base = complement[CpG_base]
CpG_qual = read.alignment.query_qualities[read.query_position + offset]
# base quality score filter @ SNP position
if CpG_qual >= cutoff_baseq:
n_baseq += 1
if CpG_base == "C":
M_readnames.add(read.alignment.query_name)
elif CpG_base == "T":
U_readnames.add(read.alignment.query_name)
print(" - Found %s reads passing mapping quality filter of %s" % (n_mapq, cutoff_mapq))
print(" - Found %s reads passing base quality filter of %s" % (n_baseq, cutoff_baseq))
print(" - Found %s reads with M allele" % len(M_readnames))
print(" - Found %s reads with U allele" % len(U_readnames))
# Remove reads in both
M_and_U = M_readnames.intersection(U_readnames)
if len(M_and_U) > 0:
print(" - %s reads discarded for being ambiguous" % len(M_and_U))
M_readnames = M_readnames.difference(M_and_U)
U_readnames = U_readnames.difference(M_and_U)
return M_readnames, U_readnames
def processSNP(chrom, pos, ref, alt, cutoff_mapq, cutoff_baseq):
"""
Find readnames of all reads with REF and ALT alleles
"""
ref_readnames = set()
alt_readnames = set()
n_mapq = 0
n_baseq = 0
for pileup in samfile.pileup(chrom, pos, pos+1):
if pileup.reference_pos == pos: # filter for position of interest
print("Processing %s reads covering SNP position %s:%s" % (
len(pileup.pileups), chrom, pos))
for read in pileup.pileups:
# read mapping quality filter
if read.alignment.mapping_quality >= cutoff_mapq:
n_mapq += 1
SNP_base = read.alignment.query_sequence[read.query_position]
SNP_qual = read.alignment.query_qualities[read.query_position]
# base quality score filter @ SNP position
if SNP_qual >= cutoff_baseq:
n_baseq += 1
if SNP_base == ref:
ref_readnames.add(read.alignment.query_name)
elif SNP_base == alt:
alt_readnames.add(read.alignment.query_name)
print(" - Found %s reads passing mapping quality filter of %s" % (n_mapq, cutoff_mapq))
print(" - Found %s reads passing base quality filter of %s" % (n_baseq, cutoff_baseq))
print(" - Found %s reads matching '%s' REF allele" % (len(ref_readnames), ref))
print(" - Found %s reads matching '%s' ALT allele" % (len(alt_readnames), alt))
# Remove reads in both
ref_and_alt = ref_readnames.intersection(alt_readnames)
if len(ref_and_alt) > 0:
print(" - %s reads discarded for being ambiguous" % len(ref_and_alt))
ref_readnames = ref_readnames.difference(ref_and_alt)
alt_readnames = alt_readnames.difference(ref_and_alt)
return ref_readnames, alt_readnames
## Entry point
parser = argparse.ArgumentParser(description="snappymeth.py - "
"Discover sites and regions of allele specific methylation from whole genome bisulfite "
"sequencing data by counting CpG methylation on alleles separately. Reads can be "
"separated by either a heterozygous SNP (when a VCF is supplied), or by the methylation "
"status of a single CpG site. Both analyses modes require sufficient sequencing coverage "
"of both alleles (default is 10x).")
parser.add_argument("input_file", help="Input VCF/CpG sites file, gzip compressed." )
parser.add_argument("input_bam", help="Input BAM file")
parser.add_argument("reference", help="Reference FASTA file")
parser.add_argument("prefix", help="Prefix for all output files - the sites and regions output csvs, "
"regional BAMs and IGV screenshots")
parser.add_argument("--input_type", choices=("VCF", "CpGs"), default="VCF", help="Whether the "
"input_file is a VCF (default) or a csv of methylation counts at CpG sites with the format "
"'chr,position,M,U' where the fields are chromosome name, 0-based position of the CpG site, "
"count of methylated bases sequenced at this site and count of unmethylated bases sequenced.")
parser.add_argument("--VCF_sample", default="0", help="The sample in the VCF to be processed - "
"either as the sample name or numeric index (0-based). Default is 0, the first sample.")
parser.add_argument("--pair_distance", type=int, default=500, help="The distance in "
"basepairs to search up and downstream from each position (default is 500).")
parser.add_argument("--max_depth", type=int, default=100, help="Maximum number "
"of reads allowed at a position to try and filter out repeat reads (default is 100)..")
parser.add_argument("--min_per_allele", type=int, default=5, help="Minimum number "
"of reads containing each allele to process a position.")
parser.add_argument("--min_sites_in_region", type=int, default=3, help="Minimum number "
"of CpG sites linked to a SNP to perform a regional analysis.")
parser.add_argument("--min_mapping_quality", type=int, default=40, help="Minimum mapping "
"quality score for a read to be considered.")
parser.add_argument("--min_base_quality", type=int, default=30, help="Minimum basecall "
"quality score at the SNP for a read to be considered.")
parser.add_argument("--region_bams", default=False, action='store_true', help="Specity to output "
"BAM files per allele when the regional fisher exact p-value is less than the cutoff "
"specified by --fisher_cutoff.")
parser.add_argument("--fisher_cutoff", type=float, default=0.0001, help="Regional fisher exact "
"p-value cutoff for a regional BAM to be created/IGV screenshot be taken (default is 0.0001).")
parser.add_argument("--IGV_screenshot", default=False, action='store_true', help="Specity to take "
"IGV screenshots of each region that passes --fisher_cutoff. Requires that IGV be running on "
"the local machine and listening on port 60151")
args = parser.parse_args()
# Check input files exists, and thet output folder is writeable
if not os.path.isfile(args.input_file):
print("Input file %s does not exist!" % args.input_file)
return
if not os.path.isfile(args.input_bam):
print("Input BAM file %s does not exist!" % args.input_bam)
return
if not os.path.isfile(args.reference):
print("Reference FASTA file %s does not exist!" % args.reference)
return
if not can_create_file(os.path.dirname(args.prefix)):
print("Output directory %s/ is not writable!" % os.path.dirname(args.prefix))
return
# Setup for IGV
if args.IGV_screenshot:
args.region_bams = True
igv = IGV()
igv.clear()
print("BAMs and IGV screenshots will be saved in %s" % os.path.dirname(os.path.abspath(args.prefix)))
igv.set_path(os.path.dirname(os.path.abspath(args.prefix)))
# Setup queue for IGV screenshots in separate process
print("Starting separate process for IGV screenshots")
IGV_queue = Queue()
reader_process = Process(target=IGV_reader, args=((IGV_queue),))
reader_process.daemon = True
reader_process.start() # Launch IGV_reader() as a separate python process
# Open the reference fasta file
print("Loading %s" % args.reference)
fafile = Fasta(args.reference)
# Index samfile if one does not already exist
samfile = pysam.AlignmentFile(args.input_bam, "rb")
if not samfile._hasIndex():
print("BAM file '%s' does not have an index, creating one..." % args.input_bam)
samfile.close()
pysam.index(args.input_bam)
samfile = pysam.AlignmentFile(args.input_bam, "rb")
# Open the output files and write headers
out_sites = open(args.prefix + ".sites.csv", "w")
out_sites.write("SNP.chr,SNP.pos,SNP.ref,SNP.alt,CpG.pos,ref.A,ref.C,ref.G,ref.T,ref.N,"
"alt.A,alt.C,alt.G,alt.T,alt.N,ref.cov,alt.cov,ref.meth,alt.meth,meth.diff,p.value\n")
out_regions = open(args.prefix + ".regions.csv", "w")
out_regions.write("SNP.chr,SNP.pos,SNP.ref,SNP.alt,first.CpG,last.CpG,nCG,ref.C,ref.T,alt.C,alt.T,"
"ref.cov,alt.cov,ref.meth,alt.meth,meth.diff,p.val\n")
if args.input_type=="VCF": # VCF analysis
# Open the VCF file
vcffile = vcf.Reader(filename=args.input_file, compressed=True)
# Check VCF_sample validity
if args.VCF_sample.isdigit(): # If a number convert to int
args.VCF_sample = int(args.VCF_sample)
if isinstance(args.VCF_sample, basestring):
try:
sample_no = vcffile.samples.index(args.VCF_sample)
except ValueError:
sys.exit("Sample %s not found in VCF!" % args.VCF_sample)
elif not args.VCF_sample < len(vcffile.samples):
sys.exit("Sample number %s not found in VCF!" % args.VCF_sample)
else:
sample_no = args.VCF_sample
print("Processing sample no %s (%s) from VCF" % (sample_no, vcffile.samples[sample_no]))
# Iterate through the VCF
for record in vcffile:
call = record.samples[sample_no]
if call.is_het:
n_ref = call['DP4'][0] + call['DP4'][1]
n_alt = call['DP4'][2] + call['DP4'][3]
if n_ref >= args.min_per_allele and n_alt >= args.min_per_allele and (n_ref + n_alt) <= args.max_depth:
# record.POS-1 as VCFs are 1 based and everything is 0 based
CpGs = findCpGs(fafile, record.CHROM, record.POS-1, args.pair_distance)
# If SNP overlaps a CpG site, remove
for site in range(record.POS-2, record.POS+1):
if site in CpGs:
CpGs.remove(site)
if len(CpGs) > 0: # If there are any CpG sites in the vicinity
ref_reads, alt_reads = processSNP(record.CHROM, record.POS-1, record.REF,
record.ALT[0].sequence, args.min_mapping_quality, args.min_base_quality)
if len(ref_reads) + len(alt_reads) <= args.max_depth:
processReadsAtPosition(record.CHROM, record.POS-1, record.REF,
record.ALT[0].sequence, CpGs, ref_reads, alt_reads, args.min_per_allele,
args.min_sites_in_region)
else: ## CpG sites analysis
with gzip.open(args.input_file, "r") as f:
CpGreader = csv.DictReader(f)
if CpGreader.fieldnames != ['chr', 'position', 'M', 'U']:
sys.exit("Field names in %s must be 'chr,position,M,U'" % args.input_file)
for CpG in CpGreader:
if int(CpG["M"]) >= args.min_per_allele and int(CpG["U"]) >= args.min_per_allele and (int(CpG["M"]) + int(CpG["U"])) <= args.max_depth:
CpGs = findCpGs(fafile, CpG["chr"], int(CpG["position"]), args.pair_distance)
try:
CpGs.remove(int(CpG["position"])) # Remove the CpG site we are processing
except ValueError:
sys.exit("Input file CpG site at '%s:%s' is a '%s' in reference. Are you sure your input file coordinates are 0-based?" % (CpG["chr"], CpG["position"], fafile[CpG["chr"]][int(CpG["position"]):int(CpG["position"])+2]))
if len(CpGs) > 0: # If there are any other CpG sites in the vicinity
M_reads, U_reads = processCpG(CpG["chr"], int(CpG["position"]),
args.min_mapping_quality, args.min_base_quality)
if len(M_reads) + len(U_reads) <= args.max_depth:
processReadsAtPosition(CpG["chr"], int(CpG["position"]), "M", "U", CpGs,
M_reads, U_reads, args.min_per_allele, args.min_sites_in_region)
# Close down IGV process
if args.IGV_screenshot:
IGV_queue.put("DONE")
print("Waiting for IGV screenshots process to finish")
reader_process.join()
# close files
samfile.close()
out_sites.close()
out_regions.close()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
#Main file
import unsplash
import dril
import keys
from html import unescape
#from re import findall, compile
import logging
import tweepy
from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageStat
from random import randrange
#Function to format quotes
def format_quote(quote):
#pattern = compile('[.?,!"#()*]+')
#breaks = pattern.findall(quote)
newSegment = ''
segments = []
important = ['"','*','(',')','[',']']
for item in quote.split():
item = unescape(item)
if '"' in item or '*' in item or '(' in item or ')' in item:
"""
#Maybe add this?
if len(item) <= 1:
segments[len(segments-1)] += item
"""
#Count occurrences in item
count = 0
for letter in item:
if letter in important: count+=1
#Add to segment if already inside a quote, then append and clear
if '"' in newSegment or '*' in newSegment or '(' in newSegment:
newSegment += item
segments.append(newSegment)
newSegment = ''
#If symbol isn't already in the newSegment, and if it's in the item twice, append and clear new segment
elif count > 1:
segments.append(newSegment)
segments.append(item)
newSegment = ''
#If not in a quote, append the string, clear, then start the quote
else:
if len(newSegment) > 0:
segments.append(newSegment)
newSegment = ''
newSegment += item + ' '
elif '#' in item:
#Append existing text, then hashtag, then clear string
segments.append(newSegment)
segments.append(item)
newSegment = ''
#Trying to figure out how to get the returns in the csv? :/
elif '.' in item or '!' in item or ',' in item or '?' in item or '\n' in item or '\r' in item:
if len(item) <= 1:
newSegment += ' ' + item
else:
newSegment += item
segments.append(newSegment)
newSegment = ''
else:
newSegment += item + ' '
#Break up segments that are getting too long
if len(newSegment) > 70: #and '"' not in newSegment and '*' not in newSegment and '(' not in newSegment:
end = newSegment.find(' ', round(len(newSegment)/2))
segments.append(newSegment[0:end])
newSegment = newSegment[end:]
#Add whatever's left
if len(newSegment) > 0:
segments.append(newSegment)
#Strip whitespace
segments = [s.strip(' ') for s in segments]
return segments
#Calculate the average brightness of the image and reduce if it's not dark enough to see white text
def adjust_brightness(image):
#Find brightness of image
temp = image.convert('L')
stat = ImageStat.Stat(temp)
brightness = (stat.mean[0]/255)
#Think this makes more sense
enhancer = ImageEnhance.Brightness(image)
if brightness > 0.10:
image = enhancer.enhance(1.10-brightness)
"""
if brightness > 0.35:
image = enhancer.enhance(0.75)
elif brightness > 0.60:
image = enhancer.enhance(0.50)
"""
return image
#Now take the segments and organize them to be drawn on the image
def beautify_quote(segments):
#Arrays of font pairings
#Was going to include more but they all had problems :'(
fontPairs = [
['Debby', 'DroidSerif-Italic'],
['Debby', 'DroidSerif-Regular'],
['Lovelo Line Light', 'sf-new-republic.sc'],
]
#Pick the nice and regular font from the arrays
fontPair = fontPairs[randrange(len(fontPairs))]
organized = []
nothingFancy = True
for segment in segments:
if '#' in segment or '"' in segment:
nothingFancy = False
for segment in segments:
temp = {'font': None, 'size': None, 'text': segment}
#If it's small or in quotes or a hashtag, give it a nice font
if '#' in segment or '"' in segment: #or len(segment) < 15
temp['font'] = fontPair[0]
temp['size'] = 50
#If nothing else is going to be in a nice font, put small text in a nice font...?
elif nothingFancy and len(segment) <= 20:
temp['font'] = fontPair[0]
temp['size'] = 50
#Otherwise
else:
temp['font'] = fontPair[1]
temp['size'] = 20
organized.append(temp)
return organized
def create_image(quote, path):
#Pick a new random image and load it
unsplash.getImage(path)
image = Image.open(path)
#Resize image to have 800px width and adjust brightness
p = 800/image.width
image = image.resize([int(image.width*p), int(image.height*p)])
image = adjust_brightness(image)
draw = ImageDraw.Draw(image)
#Break quote into segments so they look nice when they're printed
segments = format_quote(quote)
imageText = beautify_quote(segments) #{'width': image.width, 'height': image.height}
#Calculate stuff for printing
tempHeight = 0
for segment in imageText:
font = ImageFont.truetype(font='fonts/'+segment['font']+'.ttf', size=segment['size'])
height = font.getsize(segment['text'])[1]
tempHeight += height
baseHeight = (image.height - tempHeight)/2
#Write everything to the image
for segment in imageText:
font = ImageFont.truetype(font='fonts/'+segment['font']+'.ttf', size=segment['size'])
width,height = font.getsize(segment['text'])
draw.text(((image.width-width)/2, baseHeight), segment['text'], font=font)
baseHeight += height+2
#Save image
image.save('data/dril.png', "PNG")
if __name__ == "__main__":
logging.basicConfig(filename='data/info.log', level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.info('Began running script...')
#Build the file and get a random quote
d = dril.Dril()
d.build()
quote = d.quote()[1]
print('"' + quote + '" - @dril')
#Create image
create_image(quote, 'data/dril.png')
#Authenticate
auth = tweepy.OAuthHandler(keys.keys['consumer_key'], keys.keys['consumer_secret'])
auth.set_access_token(keys.keys['access_token'], keys.keys['access_token_secret'])
api = tweepy.API(auth)
#Update
try:
#api.update_with_media('data/dril.png')
logging.info('Updated successfully')
except Exception:
logging.info('Update failed: ' + str(Exception))
| |
from blocks.main_loop import MainLoop
from blocks.algorithms import TrainingAlgorithm
from blocks.roles import add_role, AuxiliaryRole
import numpy as np
from blocks.bricks.base import Brick, lazy, application
from blocks.config import config
from blocks.utils import shared_floatx_nans
from blocks.roles import WEIGHT, BIAS
from blocks.initialization import Constant
from theano import tensor as T
from blocks.filter import VariableFilter, get_brick
from blocks.utils import dict_union
from blocks.graph import add_annotation, Annotation
from blocks.filter import get_application_call
import theano
import logging
from collections import OrderedDict
from cuboid.graph import get_parameter_name
from blocks.extensions import FinishAfter, ProgressBar
logger = logging.getLogger(__name__)
class BatchNormPopulationRole(AuxiliaryRole):
pass
#: Variable for batchnorm populations
BATCHNORM_POPULATION = BatchNormPopulationRole()
class BatchNormalization(Brick):
seed_rng = np.random.RandomState(config.default_seed)
@lazy(allocation=['input_dim'])
def __init__(self, input_dim, epsilon=1e-8, use_population=False,
rolling_accumulate=False, accumulate=False, alpha=0.99, **kwargs):
super(BatchNormalization, self).__init__(**kwargs)
self.input_dim = input_dim
self.use_population = use_population
self.e = epsilon
self.accumulate = accumulate
self.rolling_accumulate = rolling_accumulate
self.alpha = alpha
@property
def seed(self):
if getattr(self, '_seed', None) is not None:
return self._seed
else:
self._seed = self.seed_rng.randint(np.iinfo(np.int32).max)
return self._seed
@seed.setter
def seed(self, value):
if hasattr(self, '_seed'):
raise AttributeError("seed already set")
self._seed = value
@property
def rng(self):
if getattr(self, '_rng', None) is not None:
return self._rng
else:
return np.random.RandomState(self.seed)
@rng.setter
def rng(self, rng):
self._rng = rng
@property
def naxes(self):
if isinstance(self.input_dim, int):
return 2
else:
return len(self.input_dim) + 1
def _allocate(self):
naxes = self.naxes
if naxes == 2:
dim = self.input_dim
elif naxes == 4:
dim = self.input_dim[0]
elif naxes == 3:
dim = self.input_dim[-1]
else:
raise NotImplementedError
self.g = shared_floatx_nans((dim, ), name='g')
self.b = shared_floatx_nans((dim, ), name='b')
add_role(self.g, WEIGHT)
add_role(self.b, BIAS)
self.parameters = [self.g, self.b]
# parameters for inference
self.u = shared_floatx_nans((dim, ), name='u')
self.s = shared_floatx_nans((dim, ), name='s')
self.n = shared_floatx_nans((1,), name='n')
self.add_auxiliary_variable(self.u, roles=[BATCHNORM_POPULATION])
self.add_auxiliary_variable(self.s, roles=[BATCHNORM_POPULATION])
self.add_auxiliary_variable(self.n, roles=[BATCHNORM_POPULATION])
def _initialize(self):
Constant(1).initialize(self.g, self.rng)
Constant(0).initialize(self.b, self.rng)
Constant(0).initialize(self.u, self.rng)
Constant(0).initialize(self.s, self.rng)
Constant(0).initialize(self.n, self.rng)
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_, **kwargs):
output, u, s = self.do_apply(input_, **kwargs)
if self.accumulate:
if self.use_population:
raise Exception("use_population is set to true as well as with"
"accumulation.",
"This is not possible as there is nothing to "
"take the population of.")
self.updates[self.u] = self.u + u
self.updates[self.s] = self.s + s
self.updates[self.n] = self.n + 1
if self.rolling_accumulate:
if self.use_population:
raise Exception("use_population is set to true as well as with"
"rolling_accumulation."
"This is not currently supported, "
" and might not make sense.")
annotation = get_application_call(output)
annotation.updates[self.u] = self.u * self.alpha + (1-self.alpha) * u
annotation.updates[self.s] = self.s * self.alpha + (1-self.alpha) * s
annotation.updates[self.n] = self.n*0 + 1
return output
@application(inputs=['input_'], outputs=['output', 'u', 's'])
def do_apply(self, input_):
X = input_
naxes = self.naxes
broadcast_n = T.addbroadcast(self.n, 0)
if naxes == 4: # CNN
if self.use_population:
u = self.u/broadcast_n
else:
u = T.mean(X, axis=[0, 2, 3])
b_u = u.dimshuffle('x', 0, 'x', 'x')
if self.use_population:
s = self.s/broadcast_n
else:
s = T.mean(T.sqr(X - b_u), axis=[0, 2, 3])
X = (X - b_u) / T.sqrt(s.dimshuffle('x', 0, 'x', 'x') + self.e)
X = self.g.dimshuffle('x', 0, 'x', 'x')*X +\
self.b.dimshuffle('x', 0, 'x', 'x')
elif naxes == 3: # RNN
if self.use_population:
u = self.u/broadcast_n
else:
u = T.mean(X, axis=[0, 1])
b_u = u.dimshuffle('x', 'x', 0)
if self.use_population:
s = self.s/broadcast_n
else:
s = T.mean(T.sqr(X - b_u), axis=[0, 1])
X = (X - b_u) / T.sqrt(s.dimshuffle('x', 'x', 0) + self.e)
X = self.g.dimshuffle('x', 'x', 0)*X +\
self.b.dimshuffle('x', 'x', 0)
elif naxes == 2: # FC
if self.use_population:
u = self.u/broadcast_n
else:
u = T.mean(X, axis=0)
if self.use_population:
s = self.s/broadcast_n
else:
s = T.mean(T.sqr(X - u), axis=0)
X = (X - u) / T.sqrt(s + self.e)
X = self.g*X + self.b
else:
raise NotImplementedError
return X, u, s
def get_dim(self, name):
if name == "input_" or name == "output":
return self.input_dim
else:
return super(BatchNormalization, self).get_dim(name)
class BatchNormAccumulate(TrainingAlgorithm):
""" TrainingAlgorithm that accumulates batchnorm parameters
"""
def __init__(self, cg):
self.cg = cg
self.parameters = get_batchnorm_parameters(cg)
self.inputs = cg.inputs
self._input_names = [i.name for i in self.inputs]
def initialize(self, **kwargs):
logger.info("BatchNormAccumulate initializing")
# get list of bricks
bricks_seen = set()
for p in self.parameters:
brick = get_brick(p)
if brick not in bricks_seen:
bricks_seen.add(brick)
# ensure all updates account for all bricks
update_parameters = set()
for b in bricks_seen:
for var, update in b.updates.items():
update_parameters.add(var)
assert b.n.get_value() == 0
if set(update_parameters) != set(self.parameters):
raise ValueError("The updates and the parameters passed in do "
"not match. This could be due to no applications "
"or multiple applications found %d updates, and "
"%d parameters" % (len(update_parameters),
len(self.parameters)))
updates = dict_union(*[b.updates for b in bricks_seen])
logger.info("Compiling BatchNorm accumulate")
self._func = theano.function(self.inputs, [], updates=updates,
on_unused_input="warn")
super(BatchNormAccumulate, self).initialize(**kwargs)
def process_batch(self, batch):
if not set(self._input_names).issubset((batch.keys())):
raise ValueError("Invalid batch. Got sources: (%s), expected "
"sources: (%s)" % (str(batch.keys()),
str(self._input_names)))
ordered_batch = [batch[v.name] for v in self.inputs]
self._func(*ordered_batch)
def get_batchnorm_parameters(cg):
""" Get the parameters marked with BATCHNORM_POPULATION
Parameters
---------
cg: `blocks.graph.ComputationGraph`
computation graph to look through
Returns
-------
variables: list
list of variables
"""
return VariableFilter(roles=[BATCHNORM_POPULATION])(cg.auxiliary_variables)
def infer_population(data_stream, model, n_batches):
""" Sets the population parameters for a given model"""
# construct a main loop with algorithm
algorithm = BatchNormAccumulate(model)
main_loop = MainLoop(
algorithm=algorithm,
data_stream=data_stream,
model=model,
extensions=[FinishAfter(after_n_batches=n_batches), ProgressBar()])
main_loop.run()
parameters = get_batchnorm_parameters(model)
batchnorm_bricks = set([get_brick(p) for p in parameters])
for b in batchnorm_bricks:
b.use_population = True
def get_batchnorm_parameter_dict(model):
parameters = get_batchnorm_parameters(model)
parameters_dict = OrderedDict()
for p in parameters:
name = get_parameter_name(p)
parameters_dict[name] = p
return parameters_dict
def get_batchnorm_parameter_values(model):
bn_dict = get_batchnorm_parameter_dict(model)
return {k: v.get_value() for k, v in bn_dict.items()}
def set_batchnorm_parameter_values(model, values_dict):
bn_dict = get_batchnorm_parameter_dict(model)
unknown = set(values_dict) - set(bn_dict)
missing = set(bn_dict) - set(values_dict)
if len(unknown):
logger.error("unknown parameter names: {}\n".format(unknown))
if len(missing):
logger.error("missing values for parameters: {}\n".format(missing))
for name, v in bn_dict.items():
v.set_value(values_dict[name])
| |
# coding: utf-8
import itertools
from operator import itemgetter
import re
import sqlalchemy as sa
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import join
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import Sequence
from sqlalchemy import SmallInteger
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import ExcludeConstraint
from sqlalchemy.dialects.postgresql import INTEGER
from sqlalchemy.dialects.postgresql import INTERVAL
from sqlalchemy.dialects.postgresql import TSRANGE
from sqlalchemy.schema import CreateIndex
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import assert_raises
from sqlalchemy.testing.assertions import assert_warns
from sqlalchemy.testing.assertions import AssertsExecutionResults
from sqlalchemy.testing.assertions import eq_
from sqlalchemy.testing.assertions import is_
from sqlalchemy.testing.assertions import is_true
class ReflectionFixtures:
@testing.fixture(
params=[
("engine", True),
("connection", True),
("engine", False),
("connection", False),
]
)
def inspect_fixture(self, request, metadata, testing_engine):
engine, future = request.param
eng = testing_engine(future=future)
conn = eng.connect()
if engine == "connection":
yield inspect(eng), conn
else:
yield inspect(conn), conn
conn.close()
class ForeignTableReflectionTest(
ReflectionFixtures, fixtures.TablesTest, AssertsExecutionResults
):
"""Test reflection on foreign tables"""
__requires__ = ("postgresql_test_dblink",)
__only_on__ = "postgresql >= 9.3"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.testing import config
dblink = config.file_config.get(
"sqla_testing", "postgres_test_db_link"
)
Table(
"testtable",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
for ddl in [
"CREATE SERVER test_server FOREIGN DATA WRAPPER postgres_fdw "
"OPTIONS (dbname 'test', host '%s')" % dblink,
"CREATE USER MAPPING FOR public \
SERVER test_server options (user 'scott', password 'tiger')",
"CREATE FOREIGN TABLE test_foreigntable ( "
" id INT, "
" data VARCHAR(30) "
") SERVER test_server OPTIONS (table_name 'testtable')",
]:
sa.event.listen(metadata, "after_create", sa.DDL(ddl))
for ddl in [
"DROP FOREIGN TABLE test_foreigntable",
"DROP USER MAPPING FOR public SERVER test_server",
"DROP SERVER test_server",
]:
sa.event.listen(metadata, "before_drop", sa.DDL(ddl))
def test_foreign_table_is_reflected(self, connection):
metadata = MetaData()
table = Table("test_foreigntable", metadata, autoload_with=connection)
eq_(
set(table.columns.keys()),
set(["id", "data"]),
"Columns of reflected foreign table didn't equal expected columns",
)
def test_get_foreign_table_names(self, inspect_fixture):
inspector, conn = inspect_fixture
ft_names = inspector.get_foreign_table_names()
eq_(ft_names, ["test_foreigntable"])
def test_get_table_names_no_foreign(self, connection):
inspector = inspect(connection)
names = inspector.get_table_names()
eq_(names, ["testtable"])
class PartitionedReflectionTest(fixtures.TablesTest, AssertsExecutionResults):
# partitioned table reflection, issue #4237
__only_on__ = "postgresql >= 10"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
# the actual function isn't reflected yet
dv = Table(
"data_values",
metadata,
Column("modulus", Integer, nullable=False),
Column("data", String(30)),
Column("q", Integer),
postgresql_partition_by="range(modulus)",
)
# looks like this is reflected prior to #4237
sa.event.listen(
dv,
"after_create",
sa.DDL(
"CREATE TABLE data_values_4_10 PARTITION OF data_values "
"FOR VALUES FROM (4) TO (10)"
),
)
if testing.against("postgresql >= 11"):
Index("my_index", dv.c.q)
def test_get_tablenames(self, connection):
assert {"data_values", "data_values_4_10"}.issubset(
inspect(connection).get_table_names()
)
def test_reflect_cols(self, connection):
cols = inspect(connection).get_columns("data_values")
eq_([c["name"] for c in cols], ["modulus", "data", "q"])
def test_reflect_cols_from_partition(self, connection):
cols = inspect(connection).get_columns("data_values_4_10")
eq_([c["name"] for c in cols], ["modulus", "data", "q"])
@testing.only_on("postgresql >= 11")
def test_reflect_index(self, connection):
idx = inspect(connection).get_indexes("data_values")
eq_(
idx,
[
{
"name": "my_index",
"unique": False,
"column_names": ["q"],
"include_columns": [],
"dialect_options": {"postgresql_include": []},
}
],
)
@testing.only_on("postgresql >= 11")
def test_reflect_index_from_partition(self, connection):
idx = inspect(connection).get_indexes("data_values_4_10")
# note the name appears to be generated by PG, currently
# 'data_values_4_10_q_idx'
eq_(
idx,
[
{
"column_names": ["q"],
"include_columns": [],
"dialect_options": {"postgresql_include": []},
"name": mock.ANY,
"unique": False,
}
],
)
class MaterializedViewReflectionTest(
ReflectionFixtures, fixtures.TablesTest, AssertsExecutionResults
):
"""Test reflection on materialized views"""
__only_on__ = "postgresql >= 9.3"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
testtable = Table(
"testtable",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
# insert data before we create the view
@sa.event.listens_for(testtable, "after_create")
def insert_data(target, connection, **kw):
connection.execute(target.insert(), {"id": 89, "data": "d1"})
materialized_view = sa.DDL(
"CREATE MATERIALIZED VIEW test_mview AS " "SELECT * FROM testtable"
)
plain_view = sa.DDL(
"CREATE VIEW test_regview AS " "SELECT * FROM testtable"
)
sa.event.listen(testtable, "after_create", plain_view)
sa.event.listen(testtable, "after_create", materialized_view)
sa.event.listen(
testtable,
"before_drop",
sa.DDL("DROP MATERIALIZED VIEW test_mview"),
)
sa.event.listen(
testtable, "before_drop", sa.DDL("DROP VIEW test_regview")
)
def test_mview_is_reflected(self, connection):
metadata = MetaData()
table = Table("test_mview", metadata, autoload_with=connection)
eq_(
set(table.columns.keys()),
set(["id", "data"]),
"Columns of reflected mview didn't equal expected columns",
)
def test_mview_select(self, connection):
metadata = MetaData()
table = Table("test_mview", metadata, autoload_with=connection)
eq_(connection.execute(table.select()).fetchall(), [(89, "d1")])
def test_get_view_names(self, inspect_fixture):
insp, conn = inspect_fixture
eq_(set(insp.get_view_names()), set(["test_regview", "test_mview"]))
def test_get_view_names_plain(self, connection):
insp = inspect(connection)
eq_(
set(insp.get_view_names(include=("plain",))), set(["test_regview"])
)
def test_get_view_names_plain_string(self, connection):
insp = inspect(connection)
eq_(set(insp.get_view_names(include="plain")), set(["test_regview"]))
def test_get_view_names_materialized(self, connection):
insp = inspect(connection)
eq_(
set(insp.get_view_names(include=("materialized",))),
set(["test_mview"]),
)
def test_get_view_names_reflection_cache_ok(self, connection):
insp = inspect(connection)
eq_(
set(insp.get_view_names(include=("plain",))), set(["test_regview"])
)
eq_(
set(insp.get_view_names(include=("materialized",))),
set(["test_mview"]),
)
eq_(set(insp.get_view_names()), set(["test_regview", "test_mview"]))
def test_get_view_names_empty(self, connection):
insp = inspect(connection)
assert_raises(ValueError, insp.get_view_names, include=())
def test_get_view_definition(self, connection):
insp = inspect(connection)
eq_(
re.sub(
r"[\n\t ]+",
" ",
insp.get_view_definition("test_mview").strip(),
),
"SELECT testtable.id, testtable.data FROM testtable;",
)
class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"""Test PostgreSQL domains"""
__only_on__ = "postgresql > 8.3"
__backend__ = True
@classmethod
def setup_test_class(cls):
with testing.db.begin() as con:
for ddl in [
'CREATE SCHEMA "SomeSchema"',
"CREATE DOMAIN testdomain INTEGER NOT NULL DEFAULT 42",
"CREATE DOMAIN test_schema.testdomain INTEGER DEFAULT 0",
"CREATE TYPE testtype AS ENUM ('test')",
"CREATE DOMAIN enumdomain AS testtype",
"CREATE DOMAIN arraydomain AS INTEGER[]",
'CREATE DOMAIN "SomeSchema"."Quoted.Domain" INTEGER DEFAULT 0',
"CREATE DOMAIN nullable_domain AS TEXT CHECK "
"(VALUE IN('FOO', 'BAR'))",
"CREATE DOMAIN not_nullable_domain AS TEXT NOT NULL",
]:
try:
con.exec_driver_sql(ddl)
except exc.DBAPIError as e:
if "already exists" not in str(e):
raise e
con.exec_driver_sql(
"CREATE TABLE testtable (question integer, answer "
"testdomain)"
)
con.exec_driver_sql(
"CREATE TABLE test_schema.testtable(question "
"integer, answer test_schema.testdomain, anything "
"integer)"
)
con.exec_driver_sql(
"CREATE TABLE crosschema (question integer, answer "
"test_schema.testdomain)"
)
con.exec_driver_sql(
"CREATE TABLE enum_test (id integer, data enumdomain)"
)
con.exec_driver_sql(
"CREATE TABLE array_test (id integer, data arraydomain)"
)
con.exec_driver_sql(
"CREATE TABLE quote_test "
'(id integer, data "SomeSchema"."Quoted.Domain")'
)
con.exec_driver_sql(
"CREATE TABLE nullable_domain_test "
"(not_nullable_domain_col nullable_domain not null,"
"nullable_local not_nullable_domain)"
)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as con:
con.exec_driver_sql("DROP TABLE testtable")
con.exec_driver_sql("DROP TABLE test_schema.testtable")
con.exec_driver_sql("DROP TABLE crosschema")
con.exec_driver_sql("DROP TABLE quote_test")
con.exec_driver_sql("DROP DOMAIN testdomain")
con.exec_driver_sql("DROP DOMAIN test_schema.testdomain")
con.exec_driver_sql("DROP TABLE enum_test")
con.exec_driver_sql("DROP DOMAIN enumdomain")
con.exec_driver_sql("DROP TYPE testtype")
con.exec_driver_sql("DROP TABLE array_test")
con.exec_driver_sql("DROP DOMAIN arraydomain")
con.exec_driver_sql('DROP DOMAIN "SomeSchema"."Quoted.Domain"')
con.exec_driver_sql('DROP SCHEMA "SomeSchema"')
con.exec_driver_sql("DROP TABLE nullable_domain_test")
con.exec_driver_sql("DROP DOMAIN nullable_domain")
con.exec_driver_sql("DROP DOMAIN not_nullable_domain")
def test_table_is_reflected(self, connection):
metadata = MetaData()
table = Table("testtable", metadata, autoload_with=connection)
eq_(
set(table.columns.keys()),
set(["question", "answer"]),
"Columns of reflected table didn't equal expected columns",
)
assert isinstance(table.c.answer.type, Integer)
def test_nullable_from_domain(self, connection):
metadata = MetaData()
table = Table(
"nullable_domain_test", metadata, autoload_with=connection
)
is_(table.c.not_nullable_domain_col.nullable, False)
is_(table.c.nullable_local.nullable, False)
def test_domain_is_reflected(self, connection):
metadata = MetaData()
table = Table("testtable", metadata, autoload_with=connection)
eq_(
str(table.columns.answer.server_default.arg),
"42",
"Reflected default value didn't equal expected value",
)
assert (
not table.columns.answer.nullable
), "Expected reflected column to not be nullable."
def test_enum_domain_is_reflected(self, connection):
metadata = MetaData()
table = Table("enum_test", metadata, autoload_with=connection)
eq_(table.c.data.type.enums, ["test"])
def test_array_domain_is_reflected(self, connection):
metadata = MetaData()
table = Table("array_test", metadata, autoload_with=connection)
eq_(table.c.data.type.__class__, ARRAY)
eq_(table.c.data.type.item_type.__class__, INTEGER)
def test_quoted_remote_schema_domain_is_reflected(self, connection):
metadata = MetaData()
table = Table("quote_test", metadata, autoload_with=connection)
eq_(table.c.data.type.__class__, INTEGER)
def test_table_is_reflected_test_schema(self, connection):
metadata = MetaData()
table = Table(
"testtable",
metadata,
autoload_with=connection,
schema="test_schema",
)
eq_(
set(table.columns.keys()),
set(["question", "answer", "anything"]),
"Columns of reflected table didn't equal expected columns",
)
assert isinstance(table.c.anything.type, Integer)
def test_schema_domain_is_reflected(self, connection):
metadata = MetaData()
table = Table(
"testtable",
metadata,
autoload_with=connection,
schema="test_schema",
)
eq_(
str(table.columns.answer.server_default.arg),
"0",
"Reflected default value didn't equal expected value",
)
assert (
table.columns.answer.nullable
), "Expected reflected column to be nullable."
def test_crosschema_domain_is_reflected(self, connection):
metadata = MetaData()
table = Table("crosschema", metadata, autoload_with=connection)
eq_(
str(table.columns.answer.server_default.arg),
"0",
"Reflected default value didn't equal expected value",
)
assert (
table.columns.answer.nullable
), "Expected reflected column to be nullable."
def test_unknown_types(self, connection):
from sqlalchemy.dialects.postgresql import base
ischema_names = base.PGDialect.ischema_names
base.PGDialect.ischema_names = {}
try:
m2 = MetaData()
assert_warns(
exc.SAWarning, Table, "testtable", m2, autoload_with=connection
)
@testing.emits_warning("Did not recognize type")
def warns():
m3 = MetaData()
t3 = Table("testtable", m3, autoload_with=connection)
assert t3.c.answer.type.__class__ == sa.types.NullType
finally:
base.PGDialect.ischema_names = ischema_names
class ReflectionTest(
ReflectionFixtures, AssertsCompiledSQL, fixtures.TestBase
):
__only_on__ = "postgresql"
__backend__ = True
def test_reflected_primary_key_order(self, metadata, connection):
meta1 = metadata
subject = Table(
"subject",
meta1,
Column("p1", Integer, primary_key=True),
Column("p2", Integer, primary_key=True),
PrimaryKeyConstraint("p2", "p1"),
)
meta1.create_all(connection)
meta2 = MetaData()
subject = Table("subject", meta2, autoload_with=connection)
eq_(subject.primary_key.columns.keys(), ["p2", "p1"])
def test_pg_weirdchar_reflection(self, metadata, connection):
meta1 = metadata
subject = Table(
"subject", meta1, Column("id$", Integer, primary_key=True)
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("subject.id$")),
)
meta1.create_all(connection)
meta2 = MetaData()
subject = Table("subject", meta2, autoload_with=connection)
referer = Table("referer", meta2, autoload_with=connection)
self.assert_(
(subject.c["id$"] == referer.c.ref).compare(
subject.join(referer).onclause
)
)
def test_reflect_default_over_128_chars(self, metadata, connection):
Table(
"t",
metadata,
Column("x", String(200), server_default="abcd" * 40),
).create(connection)
m = MetaData()
t = Table("t", m, autoload_with=connection)
eq_(
t.c.x.server_default.arg.text,
"'%s'::character varying" % ("abcd" * 40),
)
def test_renamed_sequence_reflection(self, metadata, connection):
Table("t", metadata, Column("id", Integer, primary_key=True))
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection, implicit_returning=False)
eq_(t2.c.id.server_default.arg.text, "nextval('t_id_seq'::regclass)")
r = connection.execute(t2.insert())
eq_(r.inserted_primary_key, (1,))
connection.exec_driver_sql(
"alter table t_id_seq rename to foobar_id_seq"
)
m3 = MetaData()
t3 = Table("t", m3, autoload_with=connection, implicit_returning=False)
eq_(
t3.c.id.server_default.arg.text,
"nextval('foobar_id_seq'::regclass)",
)
r = connection.execute(t3.insert())
eq_(r.inserted_primary_key, (2,))
def test_altered_type_autoincrement_pk_reflection(
self, metadata, connection
):
metadata = metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
metadata.create_all(connection)
connection.exec_driver_sql(
"alter table t alter column id type varchar(50)"
)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(t2.c.id.autoincrement, False)
eq_(t2.c.x.autoincrement, False)
def test_renamed_pk_reflection(self, metadata, connection):
metadata = metadata
Table("t", metadata, Column("id", Integer, primary_key=True))
metadata.create_all(connection)
connection.exec_driver_sql("alter table t rename id to t_id")
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_([c.name for c in t2.primary_key], ["t_id"])
def test_has_temporary_table(self, metadata, connection):
assert not inspect(connection).has_table("some_temp_table")
user_tmp = Table(
"some_temp_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
prefixes=["TEMPORARY"],
)
user_tmp.create(connection)
assert inspect(connection).has_table("some_temp_table")
def test_cross_schema_reflection_one(self, metadata, connection):
meta1 = metadata
users = Table(
"users",
meta1,
Column("user_id", Integer, primary_key=True),
Column("user_name", String(30), nullable=False),
schema="test_schema",
)
addresses = Table(
"email_addresses",
meta1,
Column("address_id", Integer, primary_key=True),
Column("remote_user_id", Integer, ForeignKey(users.c.user_id)),
Column("email_address", String(20)),
schema="test_schema",
)
meta1.create_all(connection)
meta2 = MetaData()
addresses = Table(
"email_addresses",
meta2,
autoload_with=connection,
schema="test_schema",
)
users = Table("users", meta2, must_exist=True, schema="test_schema")
j = join(users, addresses)
self.assert_(
(users.c.user_id == addresses.c.remote_user_id).compare(j.onclause)
)
def test_cross_schema_reflection_two(self, metadata, connection):
meta1 = metadata
subject = Table(
"subject", meta1, Column("id", Integer, primary_key=True)
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("subject.id")),
schema="test_schema",
)
meta1.create_all(connection)
meta2 = MetaData()
subject = Table("subject", meta2, autoload_with=connection)
referer = Table(
"referer", meta2, schema="test_schema", autoload_with=connection
)
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
def test_cross_schema_reflection_three(self, metadata, connection):
meta1 = metadata
subject = Table(
"subject",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema_2",
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("test_schema_2.subject.id")),
schema="test_schema",
)
meta1.create_all(connection)
meta2 = MetaData()
subject = Table(
"subject", meta2, autoload_with=connection, schema="test_schema_2"
)
referer = Table(
"referer", meta2, autoload_with=connection, schema="test_schema"
)
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
def test_cross_schema_reflection_four(self, metadata, connection):
meta1 = metadata
subject = Table(
"subject",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema_2",
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("test_schema_2.subject.id")),
schema="test_schema",
)
meta1.create_all(connection)
connection.detach()
connection.exec_driver_sql(
"SET search_path TO test_schema, test_schema_2"
)
meta2 = MetaData()
subject = Table(
"subject",
meta2,
autoload_with=connection,
schema="test_schema_2",
postgresql_ignore_search_path=True,
)
referer = Table(
"referer",
meta2,
autoload_with=connection,
schema="test_schema",
postgresql_ignore_search_path=True,
)
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
def test_cross_schema_reflection_five(self, metadata, connection):
meta1 = metadata
# we assume 'public'
default_schema = connection.dialect.default_schema_name
subject = Table(
"subject", meta1, Column("id", Integer, primary_key=True)
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("subject.id")),
)
meta1.create_all(connection)
meta2 = MetaData()
subject = Table(
"subject",
meta2,
autoload_with=connection,
schema=default_schema,
postgresql_ignore_search_path=True,
)
referer = Table(
"referer",
meta2,
autoload_with=connection,
schema=default_schema,
postgresql_ignore_search_path=True,
)
assert subject.schema == default_schema
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
def test_cross_schema_reflection_six(self, metadata, connection):
# test that the search path *is* taken into account
# by default
meta1 = metadata
Table(
"some_table",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema",
)
Table(
"some_other_table",
meta1,
Column("id", Integer, primary_key=True),
Column("sid", Integer, ForeignKey("test_schema.some_table.id")),
schema="test_schema_2",
)
meta1.create_all(connection)
connection.detach()
connection.exec_driver_sql(
"set search_path to test_schema_2, test_schema, public"
)
m1 = MetaData()
Table("some_table", m1, schema="test_schema", autoload_with=connection)
t2_schema = Table(
"some_other_table",
m1,
schema="test_schema_2",
autoload_with=connection,
)
t2_no_schema = Table("some_other_table", m1, autoload_with=connection)
t1_no_schema = Table("some_table", m1, autoload_with=connection)
m2 = MetaData()
t1_schema_isp = Table(
"some_table",
m2,
schema="test_schema",
autoload_with=connection,
postgresql_ignore_search_path=True,
)
t2_schema_isp = Table(
"some_other_table",
m2,
schema="test_schema_2",
autoload_with=connection,
postgresql_ignore_search_path=True,
)
# t2_schema refers to t1_schema, but since "test_schema"
# is in the search path, we instead link to t2_no_schema
assert t2_schema.c.sid.references(t1_no_schema.c.id)
# the two no_schema tables refer to each other also.
assert t2_no_schema.c.sid.references(t1_no_schema.c.id)
# but if we're ignoring search path, then we maintain
# those explicit schemas vs. what the "default" schema is
assert t2_schema_isp.c.sid.references(t1_schema_isp.c.id)
def test_cross_schema_reflection_seven(self, metadata, connection):
# test that the search path *is* taken into account
# by default
meta1 = metadata
Table(
"some_table",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema",
)
Table(
"some_other_table",
meta1,
Column("id", Integer, primary_key=True),
Column("sid", Integer, ForeignKey("test_schema.some_table.id")),
schema="test_schema_2",
)
meta1.create_all(connection)
connection.detach()
connection.exec_driver_sql(
"set search_path to test_schema_2, test_schema, public"
)
meta2 = MetaData()
meta2.reflect(connection, schema="test_schema_2")
eq_(
set(meta2.tables),
set(["test_schema_2.some_other_table", "some_table"]),
)
meta3 = MetaData()
meta3.reflect(
connection,
schema="test_schema_2",
postgresql_ignore_search_path=True,
)
eq_(
set(meta3.tables),
set(
[
"test_schema_2.some_other_table",
"test_schema.some_table",
]
),
)
def test_cross_schema_reflection_metadata_uses_schema(
self, metadata, connection
):
# test [ticket:3716]
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("sid", Integer, ForeignKey("some_other_table.id")),
schema="test_schema",
)
Table(
"some_other_table",
metadata,
Column("id", Integer, primary_key=True),
schema=None,
)
metadata.create_all(connection)
meta2 = MetaData(schema="test_schema")
meta2.reflect(connection)
eq_(
set(meta2.tables),
set(["some_other_table", "test_schema.some_table"]),
)
def test_uppercase_lowercase_table(self, metadata, connection):
a_table = Table("a", metadata, Column("x", Integer))
A_table = Table("A", metadata, Column("x", Integer))
a_table.create(connection)
assert inspect(connection).has_table("a")
assert not inspect(connection).has_table("A")
A_table.create(connection, checkfirst=True)
assert inspect(connection).has_table("A")
def test_uppercase_lowercase_sequence(self, connection):
a_seq = Sequence("a")
A_seq = Sequence("A")
a_seq.create(connection)
assert connection.dialect.has_sequence(connection, "a")
assert not connection.dialect.has_sequence(connection, "A")
A_seq.create(connection, checkfirst=True)
assert connection.dialect.has_sequence(connection, "A")
a_seq.drop(connection)
A_seq.drop(connection)
def test_index_reflection(self, metadata, connection):
"""Reflecting expression-based indexes should warn"""
Table(
"party",
metadata,
Column("id", String(10), nullable=False),
Column("name", String(20), index=True),
Column("aname", String(20)),
)
metadata.create_all(connection)
connection.exec_driver_sql("create index idx1 on party ((id || name))")
connection.exec_driver_sql(
"create unique index idx2 on party (id) where name = 'test'"
)
connection.exec_driver_sql(
"""
create index idx3 on party using btree
(lower(name::text), lower(aname::text))
"""
)
def go():
m2 = MetaData()
t2 = Table("party", m2, autoload_with=connection)
assert len(t2.indexes) == 2
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
r1, r2 = [idx[1] for idx in tmp]
assert r1.name == "idx2"
assert r1.unique is True
assert r2.unique is False
assert [t2.c.id] == r1.columns
assert [t2.c.name] == r2.columns
testing.assert_warnings(
go,
[
"Skipped unsupported reflection of "
"expression-based index idx1",
"Skipped unsupported reflection of "
"expression-based index idx3",
],
)
def test_index_reflection_partial(self, metadata, connection):
"""Reflect the filter definition on partial indexes"""
metadata = metadata
t1 = Table(
"table1",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(20)),
Column("x", Integer),
)
Index("idx1", t1.c.id, postgresql_where=t1.c.name == "test")
Index("idx2", t1.c.id, postgresql_where=t1.c.x >= 5)
metadata.create_all(connection)
ind = connection.dialect.get_indexes(connection, t1, None)
partial_definitions = []
for ix in ind:
if "dialect_options" in ix:
partial_definitions.append(
ix["dialect_options"]["postgresql_where"]
)
eq_(
sorted(partial_definitions),
["((name)::text = 'test'::text)", "(x >= 5)"],
)
t2 = Table("table1", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx),
"CREATE INDEX idx1 ON table1 (id) "
"WHERE ((name)::text = 'test'::text)",
)
def test_index_reflection_with_sorting(self, metadata, connection):
"""reflect indexes with sorting options set"""
t1 = Table(
"party",
metadata,
Column("id", String(10), nullable=False),
Column("name", String(20)),
Column("aname", String(20)),
)
t1.create(connection)
# check ASC, DESC options alone
connection.exec_driver_sql(
"""
create index idx1 on party
(id, name ASC, aname DESC)
"""
)
# check DESC w/ NULLS options
connection.exec_driver_sql(
"""
create index idx2 on party
(name DESC NULLS FIRST, aname DESC NULLS LAST)
"""
)
# check ASC w/ NULLS options
connection.exec_driver_sql(
"""
create index idx3 on party
(name ASC NULLS FIRST, aname ASC NULLS LAST)
"""
)
# reflect data
m2 = MetaData()
t2 = Table("party", m2, autoload_with=connection)
eq_(len(t2.indexes), 3)
# Make sure indexes are in the order we expect them in
r1, r2, r3 = sorted(t2.indexes, key=lambda idx: idx.name)
eq_(r1.name, "idx1")
eq_(r2.name, "idx2")
eq_(r3.name, "idx3")
# "ASC NULLS LAST" is implicit default for indexes,
# and "NULLS FIRST" is implicit default for "DESC".
# (https://www.postgresql.org/docs/11/indexes-ordering.html)
def compile_exprs(exprs):
return list(map(str, exprs))
eq_(
compile_exprs([t2.c.id, t2.c.name, t2.c.aname.desc()]),
compile_exprs(r1.expressions),
)
eq_(
compile_exprs([t2.c.name.desc(), t2.c.aname.desc().nulls_last()]),
compile_exprs(r2.expressions),
)
eq_(
compile_exprs([t2.c.name.nulls_first(), t2.c.aname]),
compile_exprs(r3.expressions),
)
def test_index_reflection_modified(self, metadata, connection):
"""reflect indexes when a column name has changed - PG 9
does not update the name of the column in the index def.
[ticket:2141]
"""
metadata = metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
metadata.create_all(connection)
connection.exec_driver_sql("CREATE INDEX idx1 ON t (x)")
connection.exec_driver_sql("ALTER TABLE t RENAME COLUMN x to y")
ind = connection.dialect.get_indexes(connection, "t", None)
expected = [{"name": "idx1", "unique": False, "column_names": ["y"]}]
if testing.requires.index_reflects_included_columns.enabled:
expected[0]["include_columns"] = []
expected[0]["dialect_options"] = {"postgresql_include": []}
eq_(ind, expected)
def test_index_reflection_with_storage_options(self, metadata, connection):
"""reflect indexes with storage options set"""
metadata = metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
metadata.create_all(connection)
connection.exec_driver_sql(
"CREATE INDEX idx1 ON t (x) WITH (fillfactor = 50)"
)
ind = testing.db.dialect.get_indexes(connection, "t", None)
expected = [
{
"unique": False,
"column_names": ["x"],
"name": "idx1",
"dialect_options": {"postgresql_with": {"fillfactor": "50"}},
}
]
if testing.requires.index_reflects_included_columns.enabled:
expected[0]["include_columns"] = []
expected[0]["dialect_options"]["postgresql_include"] = []
eq_(ind, expected)
m = MetaData()
t1 = Table("t", m, autoload_with=connection)
eq_(
list(t1.indexes)[0].dialect_options["postgresql"]["with"],
{"fillfactor": "50"},
)
def test_index_reflection_with_access_method(self, metadata, connection):
"""reflect indexes with storage options set"""
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", ARRAY(Integer)),
)
metadata.create_all(connection)
connection.exec_driver_sql("CREATE INDEX idx1 ON t USING gin (x)")
ind = testing.db.dialect.get_indexes(connection, "t", None)
expected = [
{
"unique": False,
"column_names": ["x"],
"name": "idx1",
"dialect_options": {"postgresql_using": "gin"},
}
]
if testing.requires.index_reflects_included_columns.enabled:
expected[0]["include_columns"] = []
expected[0]["dialect_options"]["postgresql_include"] = []
eq_(ind, expected)
m = MetaData()
t1 = Table("t", m, autoload_with=connection)
eq_(
list(t1.indexes)[0].dialect_options["postgresql"]["using"],
"gin",
)
@testing.skip_if("postgresql < 11.0", "indnkeyatts not supported")
def test_index_reflection_with_include(self, metadata, connection):
"""reflect indexes with include set"""
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", ARRAY(Integer)),
Column("name", String(20)),
)
metadata.create_all(connection)
connection.exec_driver_sql("CREATE INDEX idx1 ON t (x) INCLUDE (name)")
# prior to #5205, this would return:
# [{'column_names': ['x', 'name'],
# 'name': 'idx1', 'unique': False}]
ind = connection.dialect.get_indexes(connection, "t", None)
eq_(
ind,
[
{
"unique": False,
"column_names": ["x"],
"include_columns": ["name"],
"dialect_options": {"postgresql_include": ["name"]},
"name": "idx1",
}
],
)
def test_foreign_key_option_inspection(self, metadata, connection):
Table(
"person",
metadata,
Column("id", String(length=32), nullable=False, primary_key=True),
Column(
"company_id",
ForeignKey(
"company.id",
name="person_company_id_fkey",
match="FULL",
onupdate="RESTRICT",
ondelete="RESTRICT",
deferrable=True,
initially="DEFERRED",
),
),
)
Table(
"company",
metadata,
Column("id", String(length=32), nullable=False, primary_key=True),
Column("name", String(length=255)),
Column(
"industry_id",
ForeignKey(
"industry.id",
name="company_industry_id_fkey",
onupdate="CASCADE",
ondelete="CASCADE",
deferrable=False, # PG default
# PG default
initially="IMMEDIATE",
),
),
)
Table(
"industry",
metadata,
Column("id", Integer(), nullable=False, primary_key=True),
Column("name", String(length=255)),
)
fk_ref = {
"person_company_id_fkey": {
"name": "person_company_id_fkey",
"constrained_columns": ["company_id"],
"referred_columns": ["id"],
"referred_table": "company",
"referred_schema": None,
"options": {
"onupdate": "RESTRICT",
"deferrable": True,
"ondelete": "RESTRICT",
"initially": "DEFERRED",
"match": "FULL",
},
},
"company_industry_id_fkey": {
"name": "company_industry_id_fkey",
"constrained_columns": ["industry_id"],
"referred_columns": ["id"],
"referred_table": "industry",
"referred_schema": None,
"options": {"onupdate": "CASCADE", "ondelete": "CASCADE"},
},
}
metadata.create_all(connection)
inspector = inspect(connection)
fks = inspector.get_foreign_keys(
"person"
) + inspector.get_foreign_keys("company")
for fk in fks:
eq_(fk, fk_ref[fk["name"]])
def test_inspect_enums_schema(self, metadata, connection):
enum_type = postgresql.ENUM(
"sad",
"ok",
"happy",
name="mood",
schema="test_schema",
metadata=metadata,
)
enum_type.create(connection)
inspector = inspect(connection)
eq_(
inspector.get_enums("test_schema"),
[
{
"visible": False,
"name": "mood",
"schema": "test_schema",
"labels": ["sad", "ok", "happy"],
}
],
)
def test_inspect_enums(self, metadata, inspect_fixture):
inspector, conn = inspect_fixture
enum_type = postgresql.ENUM(
"cat", "dog", "rat", name="pet", metadata=metadata
)
with conn.begin():
enum_type.create(conn)
eq_(
inspector.get_enums(),
[
{
"visible": True,
"labels": ["cat", "dog", "rat"],
"name": "pet",
"schema": "public",
}
],
)
def test_get_table_oid(self, metadata, inspect_fixture):
inspector, conn = inspect_fixture
with conn.begin():
Table("some_table", metadata, Column("q", Integer)).create(conn)
assert inspector.get_table_oid("some_table") is not None
def test_inspect_enums_case_sensitive(self, metadata, connection):
sa.event.listen(
metadata,
"before_create",
sa.DDL('create schema "TestSchema"'),
)
sa.event.listen(
metadata,
"after_drop",
sa.DDL('drop schema if exists "TestSchema" cascade'),
)
for enum in "lower_case", "UpperCase", "Name.With.Dot":
for schema in None, "test_schema", "TestSchema":
postgresql.ENUM(
"CapsOne",
"CapsTwo",
name=enum,
schema=schema,
metadata=metadata,
)
metadata.create_all(connection)
inspector = inspect(connection)
for schema in None, "test_schema", "TestSchema":
eq_(
sorted(
inspector.get_enums(schema=schema), key=itemgetter("name")
),
[
{
"visible": schema is None,
"labels": ["CapsOne", "CapsTwo"],
"name": "Name.With.Dot",
"schema": "public" if schema is None else schema,
},
{
"visible": schema is None,
"labels": ["CapsOne", "CapsTwo"],
"name": "UpperCase",
"schema": "public" if schema is None else schema,
},
{
"visible": schema is None,
"labels": ["CapsOne", "CapsTwo"],
"name": "lower_case",
"schema": "public" if schema is None else schema,
},
],
)
def test_inspect_enums_case_sensitive_from_table(
self, metadata, connection
):
sa.event.listen(
metadata,
"before_create",
sa.DDL('create schema "TestSchema"'),
)
sa.event.listen(
metadata,
"after_drop",
sa.DDL('drop schema if exists "TestSchema" cascade'),
)
counter = itertools.count()
for enum in "lower_case", "UpperCase", "Name.With.Dot":
for schema in None, "test_schema", "TestSchema":
enum_type = postgresql.ENUM(
"CapsOne",
"CapsTwo",
name=enum,
metadata=metadata,
schema=schema,
)
Table(
"t%d" % next(counter),
metadata,
Column("q", enum_type),
)
metadata.create_all(connection)
inspector = inspect(connection)
counter = itertools.count()
for enum in "lower_case", "UpperCase", "Name.With.Dot":
for schema in None, "test_schema", "TestSchema":
cols = inspector.get_columns("t%d" % next(counter))
cols[0]["type"] = (
cols[0]["type"].schema,
cols[0]["type"].name,
cols[0]["type"].enums,
)
eq_(
cols,
[
{
"name": "q",
"type": (schema, enum, ["CapsOne", "CapsTwo"]),
"nullable": True,
"default": None,
"autoincrement": False,
"comment": None,
}
],
)
def test_inspect_enums_star(self, metadata, connection):
enum_type = postgresql.ENUM(
"cat", "dog", "rat", name="pet", metadata=metadata
)
schema_enum_type = postgresql.ENUM(
"sad",
"ok",
"happy",
name="mood",
schema="test_schema",
metadata=metadata,
)
enum_type.create(connection)
schema_enum_type.create(connection)
inspector = inspect(connection)
eq_(
inspector.get_enums(),
[
{
"visible": True,
"labels": ["cat", "dog", "rat"],
"name": "pet",
"schema": "public",
}
],
)
eq_(
inspector.get_enums("*"),
[
{
"visible": True,
"labels": ["cat", "dog", "rat"],
"name": "pet",
"schema": "public",
},
{
"visible": False,
"name": "mood",
"schema": "test_schema",
"labels": ["sad", "ok", "happy"],
},
],
)
def test_inspect_enum_empty(self, metadata, connection):
enum_type = postgresql.ENUM(name="empty", metadata=metadata)
enum_type.create(connection)
inspector = inspect(connection)
eq_(
inspector.get_enums(),
[
{
"visible": True,
"labels": [],
"name": "empty",
"schema": "public",
}
],
)
def test_inspect_enum_empty_from_table(self, metadata, connection):
Table(
"t", metadata, Column("x", postgresql.ENUM(name="empty"))
).create(connection)
t = Table("t", MetaData(), autoload_with=connection)
eq_(t.c.x.type.enums, [])
def test_reflection_with_unique_constraint(self, metadata, connection):
insp = inspect(connection)
meta = metadata
uc_table = Table(
"pgsql_uc",
meta,
Column("a", String(10)),
UniqueConstraint("a", name="uc_a"),
)
uc_table.create(connection)
# PostgreSQL will create an implicit index for a unique
# constraint. Separately we get both
indexes = set(i["name"] for i in insp.get_indexes("pgsql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("pgsql_uc")
)
self.assert_("uc_a" in indexes)
self.assert_("uc_a" in constraints)
# reflection corrects for the dupe
reflected = Table("pgsql_uc", MetaData(), autoload_with=connection)
indexes = set(i.name for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("uc_a" not in indexes)
self.assert_("uc_a" in constraints)
@testing.requires.btree_gist
def test_reflection_with_exclude_constraint(self, metadata, connection):
m = metadata
Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column("period", TSRANGE),
ExcludeConstraint(("period", "&&"), name="quarters_period_excl"),
)
m.create_all(connection)
insp = inspect(connection)
# PostgreSQL will create an implicit index for an exclude constraint.
# we don't reflect the EXCLUDE yet.
expected = [
{
"unique": False,
"name": "quarters_period_excl",
"duplicates_constraint": "quarters_period_excl",
"dialect_options": {"postgresql_using": "gist"},
"column_names": ["period"],
}
]
if testing.requires.index_reflects_included_columns.enabled:
expected[0]["include_columns"] = []
expected[0]["dialect_options"]["postgresql_include"] = []
eq_(insp.get_indexes("t"), expected)
# reflection corrects for the dupe
reflected = Table("t", MetaData(), autoload_with=connection)
eq_(set(reflected.indexes), set())
def test_reflect_unique_index(self, metadata, connection):
insp = inspect(connection)
meta = metadata
# a unique index OTOH we are able to detect is an index
# and not a unique constraint
uc_table = Table(
"pgsql_uc",
meta,
Column("a", String(10)),
Index("ix_a", "a", unique=True),
)
uc_table.create(connection)
indexes = dict((i["name"], i) for i in insp.get_indexes("pgsql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("pgsql_uc")
)
self.assert_("ix_a" in indexes)
assert indexes["ix_a"]["unique"]
self.assert_("ix_a" not in constraints)
reflected = Table("pgsql_uc", MetaData(), autoload_with=connection)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("ix_a" in indexes)
assert indexes["ix_a"].unique
self.assert_("ix_a" not in constraints)
def test_reflect_check_constraint(self, metadata, connection):
meta = metadata
udf_create = """\
CREATE OR REPLACE FUNCTION is_positive(
x integer DEFAULT '-1'::integer)
RETURNS boolean
LANGUAGE 'plpgsql'
COST 100
VOLATILE
AS $BODY$BEGIN
RETURN x > 0;
END;$BODY$;
"""
sa.event.listen(meta, "before_create", sa.DDL(udf_create))
sa.event.listen(
meta,
"after_drop",
sa.DDL("DROP FUNCTION IF EXISTS is_positive(integer)"),
)
Table(
"pgsql_cc",
meta,
Column("a", Integer()),
Column("b", String),
CheckConstraint("a > 1 AND a < 5", name="cc1"),
CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"),
CheckConstraint("is_positive(a)", name="cc3"),
CheckConstraint("b != 'hi\nim a name \nyup\n'", name="cc4"),
)
meta.create_all(connection)
reflected = Table("pgsql_cc", MetaData(), autoload_with=connection)
check_constraints = dict(
(uc.name, uc.sqltext.text)
for uc in reflected.constraints
if isinstance(uc, CheckConstraint)
)
eq_(
check_constraints,
{
"cc1": "(a > 1) AND (a < 5)",
"cc2": "(a = 1) OR ((a > 2) AND (a < 5))",
"cc3": "is_positive(a)",
"cc4": "(b)::text <> 'hi\nim a name \nyup\n'::text",
},
)
def test_reflect_check_warning(self):
rows = [("some name", "NOTCHECK foobar")]
conn = mock.Mock(
execute=lambda *arg, **kw: mock.MagicMock(
fetchall=lambda: rows, __iter__=lambda self: iter(rows)
)
)
with mock.patch.object(
testing.db.dialect, "get_table_oid", lambda *arg, **kw: 1
):
with testing.expect_warnings(
"Could not parse CHECK constraint text: 'NOTCHECK foobar'"
):
testing.db.dialect.get_check_constraints(conn, "foo")
def test_reflect_extra_newlines(self):
rows = [
("some name", "CHECK (\n(a \nIS\n NOT\n\n NULL\n)\n)"),
("some other name", "CHECK ((b\nIS\nNOT\nNULL))"),
("some CRLF name", "CHECK ((c\r\n\r\nIS\r\nNOT\r\nNULL))"),
("some name", "CHECK (c != 'hi\nim a name\n')"),
]
conn = mock.Mock(
execute=lambda *arg, **kw: mock.MagicMock(
fetchall=lambda: rows, __iter__=lambda self: iter(rows)
)
)
with mock.patch.object(
testing.db.dialect, "get_table_oid", lambda *arg, **kw: 1
):
check_constraints = testing.db.dialect.get_check_constraints(
conn, "foo"
)
eq_(
check_constraints,
[
{
"name": "some name",
"sqltext": "a \nIS\n NOT\n\n NULL\n",
},
{"name": "some other name", "sqltext": "b\nIS\nNOT\nNULL"},
{
"name": "some CRLF name",
"sqltext": "c\r\n\r\nIS\r\nNOT\r\nNULL",
},
{"name": "some name", "sqltext": "c != 'hi\nim a name\n'"},
],
)
def test_reflect_with_not_valid_check_constraint(self):
rows = [("some name", "CHECK ((a IS NOT NULL)) NOT VALID")]
conn = mock.Mock(
execute=lambda *arg, **kw: mock.MagicMock(
fetchall=lambda: rows, __iter__=lambda self: iter(rows)
)
)
with mock.patch.object(
testing.db.dialect, "get_table_oid", lambda *arg, **kw: 1
):
check_constraints = testing.db.dialect.get_check_constraints(
conn, "foo"
)
eq_(
check_constraints,
[
{
"name": "some name",
"sqltext": "a IS NOT NULL",
"dialect_options": {"not_valid": True},
}
],
)
class CustomTypeReflectionTest(fixtures.TestBase):
class CustomType:
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
ischema_names = None
def setup_test(self):
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = ischema_names.copy()
self.ischema_names = ischema_names
def teardown_test(self):
postgresql.PGDialect.ischema_names = self.ischema_names
self.ischema_names = None
def _assert_reflected(self, dialect):
for sch, args in [
("my_custom_type", (None, None)),
("my_custom_type()", (None, None)),
("my_custom_type(ARG1)", ("ARG1", None)),
("my_custom_type(ARG1, ARG2)", ("ARG1", "ARG2")),
]:
column_info = dialect._get_column_info(
"colname", sch, None, False, {}, {}, "public", None, "", None
)
assert isinstance(column_info["type"], self.CustomType)
eq_(column_info["type"].arg1, args[0])
eq_(column_info["type"].arg2, args[1])
def test_clslevel(self):
postgresql.PGDialect.ischema_names["my_custom_type"] = self.CustomType
dialect = postgresql.PGDialect()
self._assert_reflected(dialect)
def test_instancelevel(self):
dialect = postgresql.PGDialect()
dialect.ischema_names = dialect.ischema_names.copy()
dialect.ischema_names["my_custom_type"] = self.CustomType
self._assert_reflected(dialect)
class IntervalReflectionTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
@testing.combinations(
("YEAR",),
("MONTH",),
("DAY",),
("HOUR",),
("MINUTE",),
("SECOND",),
("YEAR TO MONTH",),
("DAY TO HOUR",),
("DAY TO MINUTE",),
("DAY TO SECOND",),
("HOUR TO MINUTE",),
("HOUR TO SECOND",),
("MINUTE TO SECOND",),
argnames="sym",
)
def test_interval_types(self, sym, metadata, connection):
t = Table(
"i_test",
metadata,
Column("id", Integer, primary_key=True),
Column("data1", INTERVAL(fields=sym)),
)
t.create(connection)
columns = {
rec["name"]: rec
for rec in inspect(connection).get_columns("i_test")
}
assert isinstance(columns["data1"]["type"], INTERVAL)
eq_(columns["data1"]["type"].fields, sym.lower())
eq_(columns["data1"]["type"].precision, None)
def test_interval_precision(self, metadata, connection):
t = Table(
"i_test",
metadata,
Column("id", Integer, primary_key=True),
Column("data1", INTERVAL(precision=6)),
)
t.create(connection)
columns = {
rec["name"]: rec
for rec in inspect(connection).get_columns("i_test")
}
assert isinstance(columns["data1"]["type"], INTERVAL)
eq_(columns["data1"]["type"].fields, None)
eq_(columns["data1"]["type"].precision, 6)
class IdentityReflectionTest(fixtures.TablesTest):
__only_on__ = "postgresql"
__backend__ = True
__requires__ = ("identity_columns",)
_names = ("t1", "T2", "MiXeDCaSe!")
@classmethod
def define_tables(cls, metadata):
for name in cls._names:
Table(
name,
metadata,
Column(
"id1",
Integer,
Identity(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
),
Column("id2", Integer, Identity()),
Column("id3", BigInteger, Identity()),
Column("id4", SmallInteger, Identity()),
)
@testing.combinations(*_names, argnames="name")
def test_reflect_identity(self, connection, name):
insp = inspect(connection)
default = dict(
always=False,
start=1,
increment=1,
minvalue=1,
cycle=False,
cache=1,
)
cols = insp.get_columns(name)
for col in cols:
if col["name"] == "id1":
is_true("identity" in col)
eq_(
col["identity"],
dict(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
)
elif col["name"] == "id2":
is_true("identity" in col)
exp = default.copy()
exp.update(maxvalue=2 ** 31 - 1)
eq_(col["identity"], exp)
elif col["name"] == "id3":
is_true("identity" in col)
exp = default.copy()
exp.update(maxvalue=2 ** 63 - 1)
eq_(col["identity"], exp)
elif col["name"] == "id4":
is_true("identity" in col)
exp = default.copy()
exp.update(maxvalue=2 ** 15 - 1)
eq_(col["identity"], exp)
| |
'''
Copyright (c) 2020 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
remotemanagement.py
---------------
Reads files saved by Apple Remote Management.
'''
import logging
import plugins.helpers.common
import struct
from datetime import timedelta
from plugins.helpers.common import CommonFunctions
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
from time import gmtime, strftime
__Plugin_Name = "ARD"
__Plugin_Friendly_Name = "APPLE REMOTE DESKTOP"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads ARD (Apple Remote Desktop) cached databases about app usage"
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "yogesh@swiftforensics.com"
__Plugin_Modes = "MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = 'Provide the ...'
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
class UserAcct:
def __init__(self, app, time_start, time_end, uid, user, source_path):
self.app = app
self.time_start = time_start
self.time_end = time_end
self.uid = uid
self.user = user
self.source_path = source_path
def PrintUserAccounts(user_accounts, output_params, input_path=''):
user_accounts_info = [ ('App',DataType.TEXT),
('Start Time',DataType.DATE),
('End Time',DataType.DATE),
('UID',DataType.TEXT),
('User',DataType.TEXT),
('Source',DataType.TEXT)
]
log.info (str(len(user_accounts)) + " user account entries found")
user_accounts_list_final = []
for item in user_accounts:
single_item = [item.app, item.time_start, item.time_end,
item.uid, item.user, item.source_path ]
user_accounts_list_final.append(single_item)
WriteList("ard user_accounts info", "ARD_UserAccounts", user_accounts_list_final, user_accounts_info, output_params, input_path)
def parse_user_acct_plist(plist, user_accounts, plist_path):
'''Parse plist and add items to app list'''
for user, items in plist.items():
uid = items.get('uid', '')
for k, v in items.items():
if k == 'uid':
continue
elif isinstance (v, list): # tty or console
session_name = k
for session in v:
ua = UserAcct(session_name,
CommonFunctions.ReadMacAbsoluteTime(session.get('inTime', None)),
CommonFunctions.ReadMacAbsoluteTime(session.get('outTime', None)),
uid, user, plist_path)
user_accounts.append(ua)
class AppUsage:
def __init__(self, app_name, app_path, was_quit, frontmost, time_start, run_length, user, source_path):
self.app_name = app_name
self.app_path = app_path
self.was_quit = was_quit
self.frontmost = frontmost
self.time_start = time_start
self.run_length = run_length
self.user = user
self.source_path = source_path
def AppUsageInsertUnique(usage_list, app_usage):
for item in usage_list:
if item.user == app_usage.user:
if item.app_path == app_usage.app_path:
if item.time_start == app_usage.time_start:
if item.run_length == app_usage.run_length:
return
# If reached here, then not found, add to list
usage_list.append(app_usage)
def convert_to_dhms(seconds):
'''Converts seconds to a string in "D Days, HH:MM:SS" format'''
hms = strftime('%H:%M:%S', gmtime(seconds)) # gets HH:MM:SS
td = timedelta(seconds=seconds)
return f"{td.days} days {hms}"
def PrintAppUsage(app_usage_list, output_params, input_path=''):
app_usage_info = [ ('App Name',DataType.TEXT),
('App Path',DataType.TEXT),
('Was Quit',DataType.INTEGER),
('Frontmost',DataType.REAL),
('Launched',DataType.DATE),
('Run Length',DataType.TEXT),
('User',DataType.TEXT),
('Source',DataType.TEXT)
]
log.info (str(len(app_usage_list)) + " app usage entries found")
app_usage_list_final = []
for item in app_usage_list:
single_usage_item = [item.app_name, item.app_path, item.was_quit,
item.frontmost, item.time_start,
convert_to_dhms(item.run_length),
item.user, item.source_path ]
app_usage_list_final.append(single_usage_item)
WriteList("ARD App Usage", "ARD_AppUsage", app_usage_list_final, app_usage_info, output_params, input_path)
def parse_app_usage_plist(plist, app_usage_list, plist_path):
'''Parse plist and add items to app list'''
for app_path, items in plist.items():
app_name = items.get('Name', '')
run_data = items.get('runData', None)
if run_data:
for run in run_data:
was_quit = run.get('wasQuit', 0)
frontmost = run.get('Frontmost', 0)
start_time = CommonFunctions.ReadMacAbsoluteTime(run.get('Launched', ''))
run_length = run.get('runLength', '')
user = run.get('userName', '')
au = AppUsage(app_name, app_path, was_quit, frontmost, start_time, run_length, user, plist_path)
AppUsageInsertUnique(app_usage_list, au)
def read_plist_from_image(mac_info, plist_path):
success, plist, error = mac_info.ReadPlist(plist_path)
if success:
return plist
else:
log.error(error)
return None
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
user_acct_path = '/private/var/db/RemoteManagement/caches/UserAcct.tmp'
app_usage_path_1 = '/private/var/db/RemoteManagement/caches/AppUsage.plist'
app_usage_path_2 = '/private/var/db/RemoteManagement/caches/AppUsage.tmp'
user_accounts = []
if mac_info.IsValidFilePath(user_acct_path):
mac_info.ExportFile(user_acct_path, __Plugin_Name, "", False)
plist = read_plist_from_image(mac_info, user_acct_path)
if plist:
parse_user_acct_plist(plist, user_accounts, user_acct_path)
if len(user_accounts) > 0:
PrintUserAccounts(user_accounts, mac_info.output_params, '')
else:
log.info('No ARD user accounts found in RemoteManagement cache')
app_usage_list = []
if mac_info.IsValidFilePath(app_usage_path_1):
mac_info.ExportFile(app_usage_path_1, __Plugin_Name, "", False)
plist = read_plist_from_image(mac_info, app_usage_path_1)
if plist:
parse_app_usage_plist(plist, app_usage_list, app_usage_path_1)
if mac_info.IsValidFilePath(app_usage_path_2):
mac_info.ExportFile(app_usage_path_2, __Plugin_Name, "", False)
plist = read_plist_from_image(mac_info, app_usage_path_2)
if plist:
parse_app_usage_plist(plist, app_usage_list, app_usage_path_2)
if len(app_usage_list) > 0:
PrintAppUsage(app_usage_list, mac_info.output_params, '')
else:
log.info('No ARD app usage info found in RemoteManagement cache')
def read_plist_file(input_file):
success, plist, error = CommonFunctions.ReadPlist(input_file)
if success:
return plist
else:
log.error(error)
return None
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
for input_path in input_files_list:
log.debug("Input file passed was: " + input_path)
#extension = os.path.splitext(input_path)[1].lower()
if input_path.lower().endswith('useracct.tmp'):
user_accounts = []
plist = read_plist_file(input_path)
if plist:
parse_user_acct_plist(plist, user_accounts, input_path)
if len(user_accounts) > 0:
PrintUserAccounts(user_accounts, output_params, input_path)
else:
log.info('No ARD user accounts found in {}'.format(input_path))
elif input_path.lower().endswith('appusage.tmp') or input_path.lower().endswith('appusage.plist'):
app_usage_list = []
plist = read_plist_file(input_path)
if plist:
parse_app_usage_plist(plist, app_usage_list, input_path)
if len(app_usage_list) > 0:
PrintAppUsage(app_usage_list, output_params, input_path)
else:
log.info('No ARD app usage info found in {}'.format(input_path))
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!")
| |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class GroupConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig):
''' constructor for handling group options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Group'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['users'] = None
# pylint: disable=too-many-instance-attributes
class Group(Yedit):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self, content):
'''Group constructor'''
super(Group, self).__init__(content=content)
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCGroup(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'group'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCGroup '''
super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._group = None
@property
def group(self):
''' property function service'''
if not self._group:
self.get()
return self._group
@group.setter
def group(self, data):
''' setter function for yedit var '''
self._group = data
def exists(self):
''' return whether a group exists '''
if self.group:
return True
return False
def get(self):
'''return group information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.group = Group(content=result['results'][0])
elif 'groups \"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
skip = []
return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=skip, debug=True)
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for group
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
namespace=dict(default='default', type='str'),
# addind users to a group is handled through the oc_users module
#users=dict(default=None, type='list'),
),
supports_check_mode=True,
)
gconfig = GroupConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
)
oc_group = OCGroup(gconfig,
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_group.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_group.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_group.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_group.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_group.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_group.needs_update():
api_rval = oc_group.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| |
#!/usr/bin/python
#
# pyxhook -- an extension to emulate some of the PyHook library on linux.
#
# Copyright (C) 2008 Tim Alexander <dragonfyre13@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Thanks to Alex Badea <vamposdecampos@gmail.com> for writing the Record
# demo for the xlib libraries. It helped me immensely working with these
# in this library.
#
# Thanks to the python-xlib team. This wouldn't have been possible without
# your code.
#
# This requires:
# at least python-xlib 1.4
# xwindows must have the "record" extension present, and active.
#
# This file has now been somewhat extensively modified by
# Daniel Folkinshteyn <nanotube@users.sf.net>
# So if there are any bugs, they are probably my fault. :)
import sys
import os
import re
import time
import threading
from Xlib import X, XK, display, error
from Xlib.ext import record
from Xlib.protocol import rq
from Xlib.protocol.event import KeyPress
from Xlib.protocol.event import KeyRelease
#######################################################################
########################START CLASS DEF################################
#######################################################################
class HookManager(threading.Thread):
"""This is the main class. Instantiate it, and you can hand it KeyDown and KeyUp (functions in your own code) which execute to parse the pyxhookkeyevent class that is returned.
This simply takes these two values for now:
KeyDown = The function to execute when a key is pressed, if it returns anything. It hands the function an argument that is the pyxhookkeyevent class.
KeyUp = The function to execute when a key is released, if it returns anything. It hands the function an argument that is the pyxhookkeyevent class.
"""
def __init__(self):
threading.Thread.__init__(self)
self.finished = threading.Event()
# Give these some initial values
self.mouse_position_x = 0
self.mouse_position_y = 0
self.ison = {"shift":False, "caps":False}
# Compile our regex statements.
self.isshift = re.compile('^Shift')
self.iscaps = re.compile('^Caps_Lock')
self.shiftablechar = re.compile('^[a-z0-9]$|^minus$|^equal$|^bracketleft$|^bracketright$|^semicolon$|^backslash$|^apostrophe$|^comma$|^period$|^slash$|^grave$')
self.logrelease = re.compile('.*')
self.isspace = re.compile('^space$')
# Assign default function actions (do nothing).
self.KeyDown = lambda x: True
self.KeyUp = lambda x: True
self.MouseAllButtonsDown = lambda x: True
self.MouseAllButtonsUp = lambda x: True
self.MouseMovement = lambda x: True
self.contextEventMask = [X.KeyPress,X.MotionNotify]
# Hook to our display.
self.local_dpy = display.Display()
self.record_dpy = display.Display()
def run(self):
# Check if the extension is present
if not self.record_dpy.has_extension("RECORD"):
print("RECORD extension not found")
sys.exit(1)
r = self.record_dpy.record_get_version(0, 0)
print("RECORD extension version %d.%d" % (r.major_version, r.minor_version))
# Create a recording context; we only want key and mouse events
self.ctx = self.record_dpy.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': tuple(self.contextEventMask), #(X.KeyPress, X.ButtonPress),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
self.record_dpy.record_enable_context(self.ctx, self.processevents)
# Finally free the context
self.record_dpy.record_free_context(self.ctx)
def cancel(self):
self.finished.set()
self.local_dpy.record_disable_context(self.ctx)
self.local_dpy.flush()
def printevent(self, event):
print(event)
def HookKeyboard(self):
pass
# We don't need to do anything here anymore, since the default mask
# is now set to contain X.KeyPress
#self.contextEventMask[0] = X.KeyPress
def HookMouse(self):
pass
# We don't need to do anything here anymore, since the default mask
# is now set to contain X.MotionNotify
# need mouse motion to track pointer position, since ButtonPress events
# don't carry that info.
#self.contextEventMask[1] = X.MotionNotify
def processevents(self, reply):
# If grab is not hooked
if not getattr(self, '_grab_is_hooked', False):
# Set grab is hooked
self._grab_is_hooked = True
# Grab keys
self._grab_keys()
if reply.category != record.FromServer:
return
if reply.client_swapped:
print("* received swapped protocol data, cowardly ignored")
return
if not len(reply.data) or ord(str(reply.data[0])) < 2:
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.record_dpy.display, None, None)
# Whether propagate the event
propagate = True
# If is KeyPress event
if event.type == X.KeyPress:
# Get event object
hookevent = self.keypressevent(event)
# Call event handler
propagate = self.KeyDown(hookevent)
# If is KeyRelease event
elif event.type == X.KeyRelease:
# Get event object
hookevent = self.keyreleaseevent(event)
# Call event handler
propagate = self.KeyUp(hookevent)
# If is not KeyPress or KeyRelease event
else:
# Ignore
return
# If need propagate the event
if propagate:
# Get focus window
window = self.local_dpy.get_input_focus()._data['focus']
# If is KeyPress event
if event.type == X.KeyPress:
# Get event class
event_class = KeyPress
# If is KeyRelease event
elif event.type == X.KeyRelease:
# Get event class
event_class = KeyRelease
else:
return
# Create event object
new_event = event_class(
detail=event.detail,
time=event.time,
root=event.root,
window=window,
child=X.NONE,
root_x=event.root_x,
root_y=event.root_y,
event_x=event.event_x,
event_y=event.event_y,
state=event.state,
same_screen=event.same_screen,
)
# Send event
self.local_dpy.send_event(window, new_event, propagate=True)
# Flush
self.local_dpy.flush()
# Return
return
if event.type == X.KeyPress:
hookevent = self.keypressevent(event)
self.KeyDown(hookevent)
elif event.type == X.KeyRelease:
hookevent = self.keyreleaseevent(event)
self.KeyUp(hookevent)
elif event.type == X.ButtonPress:
hookevent = self.buttonpressevent(event)
self.MouseAllButtonsDown(hookevent)
elif event.type == X.ButtonRelease:
hookevent = self.buttonreleaseevent(event)
self.MouseAllButtonsUp(hookevent)
elif event.type == X.MotionNotify:
# use mouse moves to record mouse position, since press and release events
# do not give mouse position info (event.root_x and event.root_y have
# bogus info).
hookevent = self.mousemoveevent(event)
self.MouseMovement(hookevent)
#print "processing events...", event.type
def _grab_keys(self):
# Select event types
self.local_dpy.screen().root.xrandr_select_input(
X.KeyPressMask | X.KeyReleaseMask
)
# Ungrab all keys
self.local_dpy.screen().root.ungrab_key(X.AnyKey, X.AnyModifier)
# Grabbed key code set
grabbed_keycode_set = set()
# For each key definition in `XK` module
for key_name, keysym in vars(XK).items():
# If the key name starts with `XK_`
if key_name.startswith('XK_'):
# Convert keysym to key code
keycode = self.local_dpy.keysym_to_keycode(keysym)
# If the key code is not in the grabbed key code set
if keycode not in grabbed_keycode_set:
# Add the key code to the grabbed key code set
grabbed_keycode_set.add(keycode)
# Ungrab the key
self.local_dpy.screen().root.ungrab_key(keycode, 0)
# Grab the key
self.local_dpy.screen().root.grab_key(
keycode,
0,
False,
X.GrabModeAsync,
X.GrabModeAsync
)
def keypressevent(self, event):
matchto = self.lookup_keysym(self.local_dpy.keycode_to_keysym(event.detail, 0))
if self.shiftablechar.match(self.lookup_keysym(self.local_dpy.keycode_to_keysym(event.detail, 0))): ## This is a character that can be typed.
if self.ison["shift"] == False:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
return self.makekeyhookevent(keysym, event)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 1)
return self.makekeyhookevent(keysym, event)
else: ## Not a typable character.
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
if self.isshift.match(matchto):
self.ison["shift"] = self.ison["shift"] + 1
elif self.iscaps.match(matchto):
if self.ison["caps"] == False:
self.ison["shift"] = self.ison["shift"] + 1
self.ison["caps"] = True
if self.ison["caps"] == True:
self.ison["shift"] = self.ison["shift"] - 1
self.ison["caps"] = False
return self.makekeyhookevent(keysym, event)
def keyreleaseevent(self, event):
if self.shiftablechar.match(self.lookup_keysym(self.local_dpy.keycode_to_keysym(event.detail, 0))):
if self.ison["shift"] == False:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 1)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
matchto = self.lookup_keysym(keysym)
if self.isshift.match(matchto):
self.ison["shift"] = self.ison["shift"] - 1
return self.makekeyhookevent(keysym, event)
def buttonpressevent(self, event):
#self.clickx = self.rootx
#self.clicky = self.rooty
return self.makemousehookevent(event)
def buttonreleaseevent(self, event):
#if (self.clickx == self.rootx) and (self.clicky == self.rooty):
##print "ButtonClick " + str(event.detail) + " x=" + str(self.rootx) + " y=" + str(self.rooty)
#if (event.detail == 1) or (event.detail == 2) or (event.detail == 3):
#self.captureclick()
#else:
#pass
return self.makemousehookevent(event)
# sys.stdout.write("ButtonDown " + str(event.detail) + " x=" + str(self.clickx) + " y=" + str(self.clicky) + "\n")
# sys.stdout.write("ButtonUp " + str(event.detail) + " x=" + str(self.rootx) + " y=" + str(self.rooty) + "\n")
#sys.stdout.flush()
def mousemoveevent(self, event):
self.mouse_position_x = event.root_x
self.mouse_position_y = event.root_y
return self.makemousehookevent(event)
# need the following because XK.keysym_to_string() only does printable chars
# rather than being the correct inverse of XK.string_to_keysym()
def lookup_keysym(self, keysym):
for name in dir(XK):
if name.startswith("XK_") and getattr(XK, name) == keysym:
return name.lstrip("XK_")
return "[%d]" % keysym
def asciivalue(self, keysym):
asciinum = XK.string_to_keysym(self.lookup_keysym(keysym))
if asciinum < 256:
return asciinum
else:
return 0
def makekeyhookevent(self, keysym, event):
storewm = self.xwindowinfo()
if event.type == X.KeyPress:
MessageName = "key down"
elif event.type == X.KeyRelease:
MessageName = "key up"
return pyxhookkeyevent(storewm["handle"], storewm["name"], storewm["class"], self.lookup_keysym(keysym), self.asciivalue(keysym), False, event.detail, MessageName)
def makemousehookevent(self, event):
storewm = self.xwindowinfo()
if event.detail == 1:
MessageName = "mouse left "
elif event.detail == 3:
MessageName = "mouse right "
elif event.detail == 2:
MessageName = "mouse middle "
elif event.detail == 5:
MessageName = "mouse wheel down "
elif event.detail == 4:
MessageName = "mouse wheel up "
else:
MessageName = "mouse " + str(event.detail) + " "
if event.type == X.ButtonPress:
MessageName = MessageName + "down"
elif event.type == X.ButtonRelease:
MessageName = MessageName + "up"
else:
MessageName = "mouse moved"
return pyxhookmouseevent(storewm["handle"], storewm["name"], storewm["class"], (self.mouse_position_x, self.mouse_position_y), MessageName)
def xwindowinfo(self):
try:
windowvar = self.local_dpy.get_input_focus().focus
wmname = windowvar.get_wm_name()
wmclass = windowvar.get_wm_class()
wmhandle = str(windowvar)[20:30]
except:
## This is to keep things running smoothly. It almost never happens, but still...
return {"name":None, "class":None, "handle":None}
if (wmname == None) and (wmclass == None):
try:
windowvar = windowvar.query_tree().parent
wmname = windowvar.get_wm_name()
wmclass = windowvar.get_wm_class()
wmhandle = str(windowvar)[20:30]
except:
## This is to keep things running smoothly. It almost never happens, but still...
return {"name":None, "class":None, "handle":None}
if wmclass == None:
return {"name":wmname, "class":wmclass, "handle":wmhandle}
else:
return {"name":wmname, "class":wmclass[0], "handle":wmhandle}
class pyxhookkeyevent:
"""This is the class that is returned with each key event.f
It simply creates the variables below in the class.
Window = The handle of the window.
WindowName = The name of the window.
WindowProcName = The backend process for the window.
Key = The key pressed, shifted to the correct caps value.
Ascii = An ascii representation of the key. It returns 0 if the ascii value is not between 31 and 256.
KeyID = This is just False for now. Under windows, it is the Virtual Key Code, but that's a windows-only thing.
ScanCode = Please don't use this. It differs for pretty much every type of keyboard. X11 abstracts this information anyway.
MessageName = "key down", "key up".
"""
def __init__(self, Window, WindowName, WindowProcName, Key, Ascii, KeyID, ScanCode, MessageName):
self.Window = Window
self.WindowName = WindowName
self.WindowProcName = WindowProcName
self.Key = Key
self.Ascii = Ascii
self.KeyID = KeyID
self.ScanCode = ScanCode
self.MessageName = MessageName
def __str__(self):
return "Window Handle: " + str(self.Window) + "\nWindow Name: " + str(self.WindowName) + "\nWindow's Process Name: " + str(self.WindowProcName) + "\nKey Pressed: " + str(self.Key) + "\nAscii Value: " + str(self.Ascii) + "\nKeyID: " + str(self.KeyID) + "\nScanCode: " + str(self.ScanCode) + "\nMessageName: " + str(self.MessageName) + "\n"
class pyxhookmouseevent:
"""This is the class that is returned with each key event.f
It simply creates the variables below in the class.
Window = The handle of the window.
WindowName = The name of the window.
WindowProcName = The backend process for the window.
Position = 2-tuple (x,y) coordinates of the mouse click
MessageName = "mouse left|right|middle down", "mouse left|right|middle up".
"""
def __init__(self, Window, WindowName, WindowProcName, Position, MessageName):
self.Window = Window
self.WindowName = WindowName
self.WindowProcName = WindowProcName
self.Position = Position
self.MessageName = MessageName
def __str__(self):
return "Window Handle: " + str(self.Window) + "\nWindow Name: " + str(self.WindowName) + "\nWindow's Process Name: " + str(self.WindowProcName) + "\nPosition: " + str(self.Position) + "\nMessageName: " + str(self.MessageName) + "\n"
#######################################################################
#########################END CLASS DEF#################################
#######################################################################
if __name__ == '__main__':
hm = HookManager()
hm.HookKeyboard()
hm.HookMouse()
hm.KeyDown = hm.printevent
hm.KeyUp = hm.printevent
hm.MouseAllButtonsDown = hm.printevent
hm.MouseAllButtonsUp = hm.printevent
hm.MouseMovement = hm.printevent
hm.start()
time.sleep(10)
hm.cancel()
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
from pymatgen.core.operations import MagSymmOp, SymmOp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
class SymmOpTestCase(PymatgenTest):
def setUp(self):
self.op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1])
def test_properties(self):
rot = self.op.rotation_matrix
vec = self.op.translation_vector
self.assertArrayAlmostEqual(rot, [[0.8660254, -0.5, 0.0], [0.5, 0.8660254, 0.0], [0.0, 0.0, 1.0]], 2)
self.assertArrayAlmostEqual(vec, [0, 0, 1], 2)
def test_operate(self):
point = np.array([1, 2, 3])
newcoord = self.op.operate(point)
self.assertArrayAlmostEqual(newcoord, [-0.1339746, 2.23205081, 4.0], 2)
def test_operate_multi(self):
point = np.array([1, 2, 3])
newcoords = self.op.operate_multi([point, point])
self.assertArrayAlmostEqual(newcoords, [[-0.1339746, 2.23205081, 4.0]] * 2, 2)
newcoords = self.op.operate_multi([[point, point]] * 2)
self.assertArrayAlmostEqual(newcoords, [[[-0.1339746, 2.23205081, 4.0]] * 2] * 2, 2)
def test_inverse(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertArrayAlmostEqual(self.op.inverse.operate(newcoord), point, 2)
def test_reflection(self):
normal = np.random.rand(3)
origin = np.random.rand(3)
refl = SymmOp.reflection(normal, origin)
point = np.random.rand(3)
newcoord = refl.operate(point)
# Distance to the plane should be negatives of each other.
self.assertAlmostEqual(np.dot(newcoord - origin, normal), -np.dot(point - origin, normal))
def test_apply_rotation_only(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
rotate_only = self.op.apply_rotation_only(point)
self.assertArrayAlmostEqual(rotate_only + self.op.translation_vector, newcoord, 2)
def test_transform_tensor(self):
# Rank 2
tensor = np.arange(0, 9).reshape(3, 3)
new_tensor = self.op.transform_tensor(tensor)
self.assertArrayAlmostEqual(
new_tensor,
[
[-0.73205, -1.73205, -0.76794],
[0.26795, 4.73205, 5.33013],
[1.69615, 9.06218, 8.0],
],
5,
)
# Rank 3
tensor = np.arange(0, 27).reshape(3, 3, 3)
new_tensor = self.op.transform_tensor(tensor)
self.assertArrayAlmostEqual(
new_tensor,
[
[
[-0.871, -2.884, -1.928],
[-2.152, -6.665, -4.196],
[-1.026, -2.830, -1.572],
],
[
[0.044, 1.531, 1.804],
[4.263, 21.008, 17.928],
[5.170, 23.026, 18.722],
],
[
[1.679, 7.268, 5.821],
[9.268, 38.321, 29.919],
[8.285, 33.651, 26.000],
],
],
3,
)
# Rank 4
tensor = np.arange(0, 81).reshape(3, 3, 3, 3)
new_tensor = self.op.transform_tensor(tensor)
self.assertArrayAlmostEqual(
new_tensor,
[
[
[
[-0.981, -3.526, -2.514],
[-3.258, -11.660, -8.286],
[-2.184, -7.786, -5.517],
],
[
[-2.454, -8.660, -6.090],
[-7.660, -26.722, -18.629],
[-4.858, -16.763, -11.588],
],
[
[-1.194, -4.090, -2.811],
[-3.358, -11.165, -7.490],
[-1.909, -6.124, -3.983],
],
],
[
[
[-0.043, 0.340, 0.499],
[1.340, 6.866, 5.959],
[1.731, 7.825, 6.412],
],
[
[4.340, 18.062, 14.155],
[21.794, 88.301, 68.123],
[18.754, 75.087, 57.517],
],
[
[5.427, 21.620, 16.510],
[24.352, 95.979, 72.811],
[19.876, 77.909, 58.899],
],
],
[
[
[1.777, 6.999, 5.306],
[7.731, 30.218, 22.804],
[6.208, 24.170, 18.194],
],
[
[9.927, 38.414, 28.804],
[41.146, 158.656, 118.694],
[32.170, 123.792, 92.488],
],
[
[8.914, 34.268, 25.586],
[36.268, 139.086, 103.684],
[28.050, 107.416, 80.000],
],
],
],
3,
)
def test_are_symmetrically_related(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertTrue(self.op.are_symmetrically_related(point, newcoord))
self.assertTrue(self.op.are_symmetrically_related(newcoord, point))
def test_to_from_dict(self):
d = self.op.as_dict()
op = SymmOp.from_dict(d)
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertTrue(op.are_symmetrically_related(point, newcoord))
def test_inversion(self):
origin = np.random.rand(3)
op = SymmOp.inversion(origin)
pt = np.random.rand(3)
inv_pt = op.operate(pt)
self.assertArrayAlmostEqual(pt - origin, origin - inv_pt)
def test_xyz(self):
op = SymmOp([[1, -1, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
s = op.as_xyz_string()
self.assertEqual(s, "x-y, -y, -z")
self.assertEqual(op, SymmOp.from_xyz_string(s))
op2 = SymmOp([[0, -1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5 + 1e-7], [0, 0, 0, 1]])
s2 = op2.as_xyz_string()
self.assertEqual(s2, "-y+1/2, x+1/2, z+1/2")
self.assertEqual(op2, SymmOp.from_xyz_string(s2))
op2 = SymmOp(
[
[3, -2, -1, 0.5],
[-1, 0, 0, 12.0 / 13],
[0, 0, 1, 0.5 + 1e-7],
[0, 0, 0, 1],
]
)
s2 = op2.as_xyz_string()
self.assertEqual(s2, "3x-2y-z+1/2, -x+12/13, z+1/2")
self.assertEqual(op2, SymmOp.from_xyz_string(s2))
op3 = SymmOp.from_xyz_string("3x - 2y - z+1 /2 , -x+12/ 13, z+1/2")
self.assertEqual(op2, op3)
# Ensure strings can be read in any order
op4 = SymmOp.from_xyz_string("1 /2 + 3X - 2y - z , 12/ 13-x, z+1/2")
op5 = SymmOp.from_xyz_string("+1 /2 + 3x - 2y - z , 12/ 13-x, +1/2+z")
self.assertEqual(op4, op3)
self.assertEqual(op4, op5)
self.assertEqual(op3, op5)
# TODO: assertWarns not in Python 2.x unittest
# update PymatgenTest for unittest2?
# self.assertWarns(UserWarning, self.op.as_xyz_string)
o = SymmOp.from_xyz_string("0.5+x, 0.25+y, 0.75+z")
self.assertArrayAlmostEqual(o.translation_vector, [0.5, 0.25, 0.75])
o = SymmOp.from_xyz_string("x + 0.5, y + 0.25, z + 0.75")
self.assertArrayAlmostEqual(o.translation_vector, [0.5, 0.25, 0.75])
class MagSymmOpTestCase(PymatgenTest):
def test_xyzt_string(self):
xyzt_strings = ["x, y, z, +1", "x, y, z, -1", "-y+1/2, x+1/2, x+1/2, +1"]
for xyzt_string in xyzt_strings:
op = MagSymmOp.from_xyzt_string(xyzt_string)
xyzt_string_out = op.as_xyzt_string()
self.assertEqual(xyzt_string, xyzt_string_out)
op = SymmOp(
[
[3, -2, -1, 0.5],
[-1, 0, 0, 12.0 / 13],
[0, 0, 1, 0.5 + 1e-7],
[0, 0, 0, 1],
]
)
magop = MagSymmOp.from_symmop(op, -1)
magop_str = magop.as_xyzt_string()
self.assertEqual(magop.time_reversal, -1)
self.assertEqual(magop_str, "3x-2y-z+1/2, -x+12/13, z+1/2, -1")
def test_to_from_dict(self):
op = SymmOp(
[
[3, -2, -1, 0.5],
[-1, 0, 0, 12.0 / 13],
[0, 0, 1, 0.5 + 1e-7],
[0, 0, 0, 1],
]
)
magop = MagSymmOp.from_symmop(op, -1)
magop2 = MagSymmOp.from_dict(magop.as_dict())
self.assertEqual(magop2.time_reversal, -1)
self.assertEqual(magop2.as_xyzt_string(), "3x-2y-z+1/2, -x+12/13, z+1/2, -1")
def test_operate_magmom(self):
# all test magmoms are the same
magmoms = [
Magmom([1, 2, 3]), # as Magmom
[1, 2, 3], # as list
Magmom([-3, 2, 1], saxis=[1, 0, 0]),
] # as Magmom with non-default saxis
xyzt_strings = ["x, y, z, +1", "x, y, z, -1", "x, -y, z, -1", "-x, -y, z, -1"]
transformed_magmoms = [[1, 2, 3], [-1, -2, -3], [1, -2, 3], [1, 2, -3]]
for xyzt_string, transformed_magmom in zip(xyzt_strings, transformed_magmoms):
for magmom in magmoms:
op = MagSymmOp.from_xyzt_string(xyzt_string)
self.assertTrue(np.allclose(transformed_magmom, op.operate_magmom(magmom).global_moment))
if __name__ == "__main__":
import unittest
unittest.main()
| |
"""
Shortcuts for retrieving input from the user.
If you are using this library for retrieving some input from the user (as a
pure Python replacement for GNU readline), probably for 90% of the use cases,
the :func:`.prompt` function is all you need. It's the easiest shortcut which
does a lot of the underlying work like creating a
:class:`~prompt_toolkit.interface.CommandLineInterface` instance for you.
When is this not sufficient:
- When you want to have more complicated layouts (maybe with sidebars or
multiple toolbars. Or visibility of certain user interface controls
according to some conditions.)
- When you wish to have multiple input buffers. (If you would create an
editor like a Vi clone.)
- Something else that requires more customization than what is possible
with the parameters of `prompt`.
In that case, study the code in this file and build your own
`CommandLineInterface` instance. It's not too complicated.
"""
from __future__ import unicode_literals
from .buffer import Buffer, AcceptAction
from .document import Document
from .enums import DEFAULT_BUFFER, SEARCH_BUFFER, EditingMode
from .filters import IsDone, HasFocus, RendererHeightIsKnown, to_simple_filter, to_cli_filter, Condition
from .history import InMemoryHistory
from .interface import CommandLineInterface, Application, AbortAction
from .key_binding.manager import KeyBindingManager
from .key_binding.registry import Registry
from .keys import Keys
from .layout import Window, HSplit, FloatContainer, Float
from .layout.containers import ConditionalContainer
from .layout.controls import BufferControl, TokenListControl
from .layout.dimension import LayoutDimension
from .layout.lexers import PygmentsLexer
from .layout.margins import PromptMargin, ConditionalMargin
from .layout.menus import CompletionsMenu, MultiColumnCompletionsMenu
from .layout.processors import PasswordProcessor, ConditionalProcessor, AppendAutoSuggestion, HighlightSearchProcessor, HighlightSelectionProcessor
from .layout.prompt import DefaultPrompt
from .layout.screen import Char
from .layout.toolbars import ValidationToolbar, SystemToolbar, ArgToolbar, SearchToolbar
from .layout.utils import explode_tokens
from .renderer import print_tokens as renderer_print_tokens
from .styles import DEFAULT_STYLE, Style, style_from_dict
from .token import Token
from .utils import is_conemu_ansi, is_windows, DummyContext
from six import text_type, exec_, PY2
import os
import sys
import textwrap
import threading
import time
try:
from pygments.lexer import Lexer as pygments_Lexer
from pygments.style import Style as pygments_Style
except ImportError:
pygments_Lexer = None
pygments_Style = None
if is_windows():
from .terminal.win32_output import Win32Output
from .terminal.conemu_output import ConEmuOutput
else:
from .terminal.vt100_output import Vt100_Output
__all__ = (
'create_eventloop',
'create_output',
'create_prompt_layout',
'create_prompt_application',
'prompt',
'prompt_async',
'create_confirm_application',
'confirm',
'print_tokens',
)
def create_eventloop(inputhook=None, recognize_win32_paste=True):
"""
Create and return an
:class:`~prompt_toolkit.eventloop.base.EventLoop` instance for a
:class:`~prompt_toolkit.interface.CommandLineInterface`.
"""
if is_windows():
from prompt_toolkit.eventloop.win32 import Win32EventLoop as Loop
return Loop(inputhook=inputhook, recognize_paste=recognize_win32_paste)
else:
from prompt_toolkit.eventloop.posix import PosixEventLoop as Loop
return Loop(inputhook=inputhook)
def create_output(stdout=None, true_color=False):
"""
Return an :class:`~prompt_toolkit.output.Output` instance for the command
line.
:param true_color: When True, use 24bit colors instead of 256 colors.
(`bool` or :class:`~prompt_toolkit.filters.SimpleFilter`.)
"""
stdout = stdout or sys.__stdout__
true_color = to_simple_filter(true_color)
if is_windows():
if is_conemu_ansi():
return ConEmuOutput(stdout)
else:
return Win32Output(stdout)
else:
term = os.environ.get('TERM', '')
if PY2:
term = term.decode('utf-8')
return Vt100_Output.from_pty(stdout, true_color=true_color, term=term)
def create_asyncio_eventloop(loop=None):
"""
Returns an asyncio :class:`~prompt_toolkit.eventloop.EventLoop` instance
for usage in a :class:`~prompt_toolkit.interface.CommandLineInterface`. It
is a wrapper around an asyncio loop.
:param loop: The asyncio eventloop (or `None` if the default asyncioloop
should be used.)
"""
# Inline import, to make sure the rest doesn't break on Python 2. (Where
# asyncio is not available.)
if is_windows():
from prompt_toolkit.eventloop.asyncio_win32 import Win32AsyncioEventLoop as AsyncioEventLoop
else:
from prompt_toolkit.eventloop.asyncio_posix import PosixAsyncioEventLoop as AsyncioEventLoop
return AsyncioEventLoop(loop)
def _split_multiline_prompt(get_prompt_tokens):
"""
Take a `get_prompt_tokens` function and return three new functions instead.
One that tells whether this prompt consists of multiple lines; one that
returns the tokens to be shown on the lines above the input; and another
one with the tokens to be shown at the first line of the input.
"""
def has_before_tokens(cli):
for token, char in get_prompt_tokens(cli):
if '\n' in char:
return True
return False
def before(cli):
result = []
found_nl = False
for token, char in reversed(explode_tokens(get_prompt_tokens(cli))):
if found_nl:
result.insert(0, (token, char))
elif char == '\n':
found_nl = True
return result
def first_input_line(cli):
result = []
for token, char in reversed(explode_tokens(get_prompt_tokens(cli))):
if char == '\n':
break
else:
result.insert(0, (token, char))
return result
return has_before_tokens, before, first_input_line
class _RPrompt(Window):
" The prompt that is displayed on the right side of the Window. "
def __init__(self, get_tokens=None):
get_tokens = get_tokens or (lambda cli: [])
super(_RPrompt, self).__init__(
TokenListControl(get_tokens, align_right=True))
def create_prompt_layout(message='', lexer=None, is_password=False,
reserve_space_for_menu=8,
get_prompt_tokens=None, get_continuation_tokens=None,
get_rprompt_tokens=None,
get_bottom_toolbar_tokens=None,
display_completions_in_columns=False,
extra_input_processors=None, multiline=False,
wrap_lines=True):
"""
Create a :class:`.Container` instance for a prompt.
:param message: Text to be used as prompt.
:param lexer: :class:`~prompt_toolkit.layout.lexers.Lexer` to be used for
the highlighting.
:param is_password: `bool` or :class:`~prompt_toolkit.filters.CLIFilter`.
When True, display input as '*'.
:param reserve_space_for_menu: Space to be reserved for the menu. When >0,
make sure that a minimal height is allocated in the terminal, in order
to display the completion menu.
:param get_prompt_tokens: An optional callable that returns the tokens to be
shown in the menu. (To be used instead of a `message`.)
:param get_continuation_tokens: An optional callable that takes a
CommandLineInterface and width as input and returns a list of (Token,
text) tuples to be used for the continuation.
:param get_bottom_toolbar_tokens: An optional callable that returns the
tokens for a toolbar at the bottom.
:param display_completions_in_columns: `bool` or
:class:`~prompt_toolkit.filters.CLIFilter`. Display the completions in
multiple columns.
:param multiline: `bool` or :class:`~prompt_toolkit.filters.CLIFilter`.
When True, prefer a layout that is more adapted for multiline input.
Text after newlines is automatically indented, and search/arg input is
shown below the input, instead of replacing the prompt.
:param wrap_lines: `bool` or :class:`~prompt_toolkit.filters.CLIFilter`.
When True (the default), automatically wrap long lines instead of
scrolling horizontally.
"""
assert isinstance(message, text_type), 'Please provide a unicode string.'
assert get_bottom_toolbar_tokens is None or callable(get_bottom_toolbar_tokens)
assert get_prompt_tokens is None or callable(get_prompt_tokens)
assert get_rprompt_tokens is None or callable(get_rprompt_tokens)
assert not (message and get_prompt_tokens)
display_completions_in_columns = to_cli_filter(display_completions_in_columns)
multiline = to_cli_filter(multiline)
if get_prompt_tokens is None:
get_prompt_tokens = lambda _: [(Token.Prompt, message)]
has_before_tokens, get_prompt_tokens_1, get_prompt_tokens_2 = \
_split_multiline_prompt(get_prompt_tokens)
# `lexer` is supposed to be a `Lexer` instance. But if a Pygments lexer
# class is given, turn it into a PygmentsLexer. (Important for
# backwards-compatibility.)
try:
if pygments_Lexer and issubclass(lexer, pygments_Lexer):
lexer = PygmentsLexer(lexer, sync_from_start=True)
except TypeError: # Happens when lexer is `None` or an instance of something else.
pass
# Create processors list.
input_processors = [
ConditionalProcessor(
# By default, only highlight search when the search
# input has the focus. (Note that this doesn't mean
# there is no search: the Vi 'n' binding for instance
# still allows to jump to the next match in
# navigation mode.)
HighlightSearchProcessor(preview_search=True),
HasFocus(SEARCH_BUFFER)),
HighlightSelectionProcessor(),
ConditionalProcessor(AppendAutoSuggestion(), HasFocus(DEFAULT_BUFFER) & ~IsDone()),
ConditionalProcessor(PasswordProcessor(), is_password)
]
if extra_input_processors:
input_processors.extend(extra_input_processors)
# Show the prompt before the input (using the DefaultPrompt processor.
# This also replaces it with reverse-i-search and 'arg' when required.
# (Only for single line mode.)
# (DefaultPrompt should always be at the end of the processors.)
input_processors.append(ConditionalProcessor(
DefaultPrompt(get_prompt_tokens_2), ~multiline))
# Create bottom toolbar.
if get_bottom_toolbar_tokens:
toolbars = [ConditionalContainer(
Window(TokenListControl(get_bottom_toolbar_tokens,
default_char=Char(' ', Token.Toolbar)),
height=LayoutDimension.exact(1)),
filter=~IsDone() & RendererHeightIsKnown())]
else:
toolbars = []
def get_height(cli):
# If there is an autocompletion menu to be shown, make sure that our
# layout has at least a minimal height in order to display it.
if reserve_space_for_menu and not cli.is_done:
buff = cli.current_buffer
# Reserve the space, either when there are completions, or when
# `complete_while_typing` is true and we expect completions very
# soon.
if buff.complete_while_typing(cli) or buff.complete_state is not None:
return LayoutDimension(min=reserve_space_for_menu)
return LayoutDimension()
# Create and return Container instance.
return HSplit([
# The main input, with completion menus floating on top of it.
FloatContainer(
HSplit([
ConditionalContainer(
Window(
TokenListControl(get_prompt_tokens_1),
dont_extend_height=True),
Condition(has_before_tokens)
),
Window(
BufferControl(
input_processors=input_processors,
lexer=lexer,
# Enable preview_search, we want to have immediate feedback
# in reverse-i-search mode.
preview_search=True),
get_height=get_height,
left_margins=[
# In multiline mode, use the window margin to display
# the prompt and continuation tokens.
ConditionalMargin(
PromptMargin(get_prompt_tokens_2, get_continuation_tokens),
filter=multiline
)
],
wrap_lines=wrap_lines,
),
]),
[
# Completion menus.
Float(xcursor=True,
ycursor=True,
content=CompletionsMenu(
max_height=16,
scroll_offset=1,
extra_filter=HasFocus(DEFAULT_BUFFER) &
~display_completions_in_columns)),
Float(xcursor=True,
ycursor=True,
content=MultiColumnCompletionsMenu(
extra_filter=HasFocus(DEFAULT_BUFFER) &
display_completions_in_columns,
show_meta=True)),
# The right prompt.
Float(right=0, top=0, hide_when_covering_content=True,
content=_RPrompt(get_rprompt_tokens)),
]
),
ValidationToolbar(),
SystemToolbar(),
# In multiline mode, we use two toolbars for 'arg' and 'search'.
ConditionalContainer(ArgToolbar(), multiline),
ConditionalContainer(SearchToolbar(), multiline),
] + toolbars)
def create_prompt_application(
message='',
multiline=False,
wrap_lines=True,
is_password=False,
vi_mode=False,
editing_mode=EditingMode.EMACS,
complete_while_typing=True,
enable_history_search=False,
lexer=None,
enable_system_bindings=False,
enable_open_in_editor=False,
validator=None,
completer=None,
reserve_space_for_menu=8,
auto_suggest=None,
style=None,
history=None,
clipboard=None,
get_prompt_tokens=None,
get_continuation_tokens=None,
get_rprompt_tokens=None,
get_bottom_toolbar_tokens=None,
display_completions_in_columns=False,
get_title=None,
mouse_support=False,
extra_input_processors=None,
key_bindings_registry=None,
on_abort=AbortAction.RAISE_EXCEPTION,
on_exit=AbortAction.RAISE_EXCEPTION,
accept_action=AcceptAction.RETURN_DOCUMENT,
erase_when_done=False,
default=''):
"""
Create an :class:`~Application` instance for a prompt.
(It is meant to cover 90% of the prompt use cases, where no extreme
customization is required. For more complex input, it is required to create
a custom :class:`~Application` instance.)
:param message: Text to be shown before the prompt.
:param mulitiline: Allow multiline input. Pressing enter will insert a
newline. (This requires Meta+Enter to accept the input.)
:param wrap_lines: `bool` or :class:`~prompt_toolkit.filters.CLIFilter`.
When True (the default), automatically wrap long lines instead of
scrolling horizontally.
:param is_password: Show asterisks instead of the actual typed characters.
:param editing_mode: ``EditingMode.VI`` or ``EditingMode.EMACS``.
:param vi_mode: `bool`, if True, Identical to ``editing_mode=EditingMode.VI``.
:param complete_while_typing: `bool` or
:class:`~prompt_toolkit.filters.CLIFilter`. Enable autocompletion while
typing.
:param enable_history_search: `bool` or
:class:`~prompt_toolkit.filters.CLIFilter`. Enable up-arrow parting
string matching.
:param lexer: :class:`~prompt_toolkit.layout.lexers.Lexer` to be used for
the syntax highlighting.
:param validator: :class:`~prompt_toolkit.validation.Validator` instance
for input validation.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance
for input completion.
:param reserve_space_for_menu: Space to be reserved for displaying the menu.
(0 means that no space needs to be reserved.)
:param auto_suggest: :class:`~prompt_toolkit.auto_suggest.AutoSuggest`
instance for input suggestions.
:param style: :class:`.Style` instance for the color scheme.
:param enable_system_bindings: `bool` or
:class:`~prompt_toolkit.filters.CLIFilter`. Pressing Meta+'!' will show
a system prompt.
:param enable_open_in_editor: `bool` or
:class:`~prompt_toolkit.filters.CLIFilter`. Pressing 'v' in Vi mode or
C-X C-E in emacs mode will open an external editor.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param clipboard: :class:`~prompt_toolkit.clipboard.base.Clipboard` instance.
(e.g. :class:`~prompt_toolkit.clipboard.in_memory.InMemoryClipboard`)
:param get_bottom_toolbar_tokens: Optional callable which takes a
:class:`~prompt_toolkit.interface.CommandLineInterface` and returns a
list of tokens for the bottom toolbar.
:param display_completions_in_columns: `bool` or
:class:`~prompt_toolkit.filters.CLIFilter`. Display the completions in
multiple columns.
:param get_title: Callable that returns the title to be displayed in the
terminal.
:param mouse_support: `bool` or :class:`~prompt_toolkit.filters.CLIFilter`
to enable mouse support.
:param default: The default text to be shown in the input buffer. (This can
be edited by the user.)
"""
if key_bindings_registry is None:
key_bindings_registry = KeyBindingManager.for_prompt(
enable_system_bindings=enable_system_bindings,
enable_open_in_editor=enable_open_in_editor).registry
# Ensure backwards-compatibility, when `vi_mode` is passed.
if vi_mode:
editing_mode = EditingMode.VI
# Make sure that complete_while_typing is disabled when enable_history_search
# is enabled. (First convert to SimpleFilter, to avoid doing bitwise operations
# on bool objects.)
complete_while_typing = to_simple_filter(complete_while_typing)
enable_history_search = to_simple_filter(enable_history_search)
multiline = to_simple_filter(multiline)
complete_while_typing = complete_while_typing & ~enable_history_search
# Accept Pygments styles as well for backwards compatibility.
try:
if pygments_Style and issubclass(style, pygments_Style):
style = style_from_dict(style.styles)
except TypeError: # Happens when style is `None` or an instance of something else.
pass
# Create application
return Application(
layout=create_prompt_layout(
message=message,
lexer=lexer,
is_password=is_password,
reserve_space_for_menu=(reserve_space_for_menu if completer is not None else 0),
multiline=Condition(lambda cli: multiline()),
get_prompt_tokens=get_prompt_tokens,
get_continuation_tokens=get_continuation_tokens,
get_rprompt_tokens=get_rprompt_tokens,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,
display_completions_in_columns=display_completions_in_columns,
extra_input_processors=extra_input_processors,
wrap_lines=wrap_lines),
buffer=Buffer(
enable_history_search=enable_history_search,
complete_while_typing=complete_while_typing,
is_multiline=multiline,
history=(history or InMemoryHistory()),
validator=validator,
completer=completer,
auto_suggest=auto_suggest,
accept_action=accept_action,
initial_document=Document(default),
),
style=style or DEFAULT_STYLE,
clipboard=clipboard,
key_bindings_registry=key_bindings_registry,
get_title=get_title,
mouse_support=mouse_support,
editing_mode=editing_mode,
erase_when_done=erase_when_done,
on_abort=on_abort,
on_exit=on_exit)
def prompt(message='', **kwargs):
"""
Get input from the user and return it.
This is a wrapper around a lot of ``prompt_toolkit`` functionality and can
be a replacement for `raw_input`. (or GNU readline.)
If you want to keep your history across several calls, create one
:class:`~prompt_toolkit.history.History` instance and pass it every time.
This function accepts many keyword arguments. Except for the following,
they are a proxy to the arguments of :func:`.create_prompt_application`.
:param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that
print statements from other threads won't destroy the prompt. (They
will be printed above the prompt instead.)
:param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3)
:param true_color: When True, use 24bit colors instead of 256 colors.
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
"""
patch_stdout = kwargs.pop('patch_stdout', False)
return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False)
true_color = kwargs.pop('true_color', False)
refresh_interval = kwargs.pop('refresh_interval', 0)
eventloop = kwargs.pop('eventloop', None)
application = create_prompt_application(message, **kwargs)
return run_application(application,
patch_stdout=patch_stdout,
return_asyncio_coroutine=return_asyncio_coroutine,
true_color=true_color,
refresh_interval=refresh_interval,
eventloop=eventloop)
def run_application(
application, patch_stdout=False, return_asyncio_coroutine=False,
true_color=False, refresh_interval=0, eventloop=None):
"""
Run a prompt toolkit application.
:param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that
print statements from other threads won't destroy the prompt. (They
will be printed above the prompt instead.)
:param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3)
:param true_color: When True, use 24bit colors instead of 256 colors.
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
"""
assert isinstance(application, Application)
if return_asyncio_coroutine:
eventloop = create_asyncio_eventloop()
else:
eventloop = eventloop or create_eventloop()
# Create CommandLineInterface.
cli = CommandLineInterface(
application=application,
eventloop=eventloop,
output=create_output(true_color=true_color))
# Set up refresh interval.
if refresh_interval:
done = [False]
def start_refresh_loop(cli):
def run():
while not done[0]:
time.sleep(refresh_interval)
cli.request_redraw()
t = threading.Thread(target=run)
t.daemon = True
t.start()
def stop_refresh_loop(cli):
done[0] = True
cli.on_start += start_refresh_loop
cli.on_stop += stop_refresh_loop
# Replace stdout.
patch_context = cli.patch_stdout_context() if patch_stdout else DummyContext()
# Read input and return it.
if return_asyncio_coroutine:
# Create an asyncio coroutine and call it.
exec_context = {'patch_context': patch_context, 'cli': cli,
'Document': Document}
exec_(textwrap.dedent('''
import asyncio
@asyncio.coroutine
def prompt_coro():
with patch_context:
result = yield from cli.run_async(reset_current_buffer=False)
if isinstance(result, Document): # Backwards-compatibility.
return result.text
return result
'''), exec_context)
return exec_context['prompt_coro']()
else:
# Note: We pass `reset_current_buffer=False`, because that way it's easy to
# give DEFAULT_BUFFER a default value, without it getting erased. We
# don't have to reset anyway, because this is the first and only time
# that this CommandLineInterface will run.
try:
with patch_context:
result = cli.run(reset_current_buffer=False)
if isinstance(result, Document): # Backwards-compatibility.
return result.text
return result
finally:
eventloop.close()
def prompt_async(message='', **kwargs):
"""
Similar to :func:`.prompt`, but return an asyncio coroutine instead.
"""
kwargs['return_asyncio_coroutine'] = True
return prompt(message, **kwargs)
def create_confirm_application(message):
"""
Create a confirmation `Application` that returns True/False.
"""
registry = Registry()
@registry.add_binding('y')
@registry.add_binding('Y')
def _(event):
event.cli.buffers[DEFAULT_BUFFER].text = 'y'
event.cli.set_return_value(True)
@registry.add_binding('n')
@registry.add_binding('N')
@registry.add_binding(Keys.ControlC)
def _(event):
event.cli.buffers[DEFAULT_BUFFER].text = 'n'
event.cli.set_return_value(False)
return create_prompt_application(message, key_bindings_registry=registry)
def confirm(message='Confirm (y or n) '):
"""
Display a confirmation prompt.
"""
assert isinstance(message, text_type)
app = create_confirm_application(message)
return run_application(app)
def print_tokens(tokens, style=None, true_color=False):
"""
Print a list of (Token, text) tuples in the given style to the output.
E.g.::
style = style_from_dict({
Token.Hello: '#ff0066',
Token.World: '#884444 italic',
})
tokens = [
(Token.Hello, 'Hello'),
(Token.World, 'World'),
]
print_tokens(tokens, style=style)
:param tokens: List of ``(Token, text)`` tuples.
:param style: :class:`.Style` instance for the color scheme.
:param true_color: When True, use 24bit colors instead of 256 colors.
"""
assert isinstance(style, Style)
output = create_output(true_color=true_color)
renderer_print_tokens(output, tokens, style)
# Deprecated alias for `prompt`.
get_input = prompt
# Deprecated alias for create_default_layout
create_default_layout = create_prompt_layout
# Deprecated alias for create_default_application
create_default_application = create_prompt_application
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class, branched from slim for fp16 performance study."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Args:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| |
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
NamespacedAttribute,
whitespace_re,
)
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
try:
# Pre-0.99999999
from html5lib.treebuilders import _base as treebuilder_base
new_html5lib = False
except ImportError as e:
# 0.99999999 and up
from html5lib.treebuilders import base as treebuilder_base
new_html5lib = True
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
NAME = "html5lib"
features = [NAME, PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding,
document_declared_encoding=None, exclude_encodings=None):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
# document_declared_encoding and exclude_encodings aren't used
# ATM because the html5lib TreeBuilder doesn't use
# UnicodeDammit.
if exclude_encodings:
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
extra_kwargs = dict()
if not isinstance(markup, str):
if new_html5lib:
extra_kwargs['override_encoding'] = self.user_specified_encoding
else:
extra_kwargs['encoding'] = self.user_specified_encoding
doc = parser.parse(markup, **extra_kwargs)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, str):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
original_encoding = parser.tokenizer.stream.charEncoding[0]
if not isinstance(original_encoding, str):
# In 0.99999999 and up, the encoding is an html5lib
# Encoding object. We want to use a string for compatibility
# with other tree builders.
original_encoding = original_encoding.name
doc.original_encoding = original_encoding
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return treebuilder_base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
# If this attribute is a multi-valued attribute for this element,
# turn its value into a list.
list_attr = HTML5TreeBuilder.cdata_list_attributes
if (name in list_attr['*']
or (self.element.name in list_attr
and name in list_attr[self.element.name])):
# A node that is being cloned may have already undergone
# this procedure.
if not isinstance(value, list):
value = whitespace_re.split(value)
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(treebuilder_base.Node):
def __init__(self, element, soup, namespace):
treebuilder_base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, str):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, str) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, str):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
elif self.element.next_element is not None:
# Something from further ahead in the parse tree is
# being inserted into this earlier element. This is
# very annoying because it means an expensive search
# for the last element in the tree.
most_recent_element = self.soup._last_descendant()
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in list(attributes.items()):
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
# print "MOVE", self.element.contents
# print "FROM", self.element
# print "TO", new_parent.element
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent_element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
if new_parents_last_descendant:
first_child.previous_element = new_parents_last_descendant
else:
first_child.previous_element = new_parent_element
first_child.previous_sibling = new_parents_last_child
if new_parents_last_descendant:
new_parents_last_descendant.next_element = first_child
else:
new_parent_element.next_element = first_child
if new_parents_last_child:
new_parents_last_child.next_sibling = first_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
if new_parents_last_descendant_next_element:
new_parents_last_descendant_next_element.previous_element = last_child
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
# print "DONE WITH MOVE"
# print "FROM", self.element
# print "TO", new_parent_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
treebuilder_base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| |
#!/usr/bin/env python
from siconos.mechanics.collision.tools import Volume, Contactor, Shape
from siconos.io.mechanics_io import Hdf5
import siconos.io.mechanics_io
siconos.io.mechanics_io.set_backend('occ')
l1 = 0.153 # crank length
l2 = 0.306 # connecting rod length
a = 0.05 # half length of the slider
b = 0.025 # half height of the slider
c = 0.001 # clearance between slider and guide
w10 = -150. # initial angular speed for the crank
w20 = 75. # initial angular speed for the connecting rod
w30 = 0. # initial angular speed for the slider
with Hdf5() as io:
io.addPluginSource('plugin', 'SliderCrankPlugin/SliderCrankPlugin.cpp')
io.addShapeDataFromFile('body1',
'../Mechanisms/SliderCrank/CAD/body1.step')
io.addShapeDataFromFile('body2',
'../Mechanisms/SliderCrank/CAD/body2.step')
io.addShapeDataFromFile('Slider',
'../Mechanisms/SliderCrank/CAD/Slider.step')
io.addShapeDataFromFile('Contact_b_cyl',
'../Mechanisms/SliderCrank/CAD/contact_b_cyl.step')
io.addShapeDataFromFile('Contact_h_cyl',
'../Mechanisms/SliderCrank/CAD/contact_h_cyl.step')
io.addShapeDataFromFile('RingBody',
'../Mechanisms/SliderCrank/CAD/RingBody1.stp')
io.addShapeDataFromFile('Chamber',
'../Mechanisms/SliderCrank/CAD/chamber.step')
io.addShapeDataFromFile('AxisBody',
'../Mechanisms/SliderCrank/CAD/AxisBody2.stp')
io.addShapeDataFromFile('Artefact',
'../Mechanisms/SliderCrank/CAD/artefact2.step')
io.addObject('Artefact', [Shape(shape_name='Artefact',
instance_name='artefact')],
translation=[0., 0., 0.])
io.addObject('part1', [Volume(shape_name='body1',
instance_name='Body1',
relative_translation=[-0.5*l1, 0., 0.],
relative_orientation=[(0, 1, 0), 0.])],
translation=[0.5*l1, 0., 0.],
velocity=[0., 0., -0.5 * w10 * l1, 0., w10, 0.],
mass=0.038,
inertia=[7.4e-5, 1, 1.])
io.addObject('part2', [Volume(shape_name='body2',
instance_name='Body2',
relative_translation=[-0.5 * l2, 0., 0.])],
translation=[l1 + 0.5*l2, 0., 0.],
orientation=[0., 0., 1., 0.],
velocity=[0., 0., -0.5 * w10 * l1, 0., w20, 0.],
mass=0.038,
inertia=[5.9e-4, 1., 1.])
io.addObject('slider', [
Shape(shape_name='Slider',
instance_name='cslid',
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_b_f1',
shape_name='Contact_b_cyl',
contact_type='Face',
contact_index=1,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_h_f1',
shape_name='Contact_h_cyl',
contact_type='Face',
contact_index=1,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_b_f0',
shape_name='Contact_b_cyl',
contact_type='Face',
contact_index=0,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_h_f0',
shape_name='Contact_h_cyl',
contact_type='Face',
contact_index=0,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_b_e1',
shape_name='Contact_b_cyl',
contact_type='Edge',
contact_index=1,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_h_e1',
shape_name='Contact_h_cyl',
contact_type='Edge',
contact_index=1,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_b_e0',
shape_name='Contact_b_cyl',
contact_type='Edge',
contact_index=0,
relative_translation=[-a, 0., 0.]),
Contactor(
instance_name='Contact_h_e0',
shape_name='Contact_h_cyl',
contact_type='Edge',
contact_index=0,
relative_translation=[-a, 0., 0.])],
translation=[l1 + l2 + a, 0., 0.],
velocity=[-0., 0., 0., 0., w30, 0.],
mass=0.076,
inertia=[2.7e-6, 1., 1.])
# a static object (mass=0)
io.addObject('chamber', [Contactor(
instance_name='Chamber_contact0',
shape_name='Chamber',
contact_type='Face',
contact_index=0,
relative_translation=[0, 0, 0]),
Contactor(
instance_name='Chamber_contact1',
shape_name='Chamber',
contact_type='Face',
contact_index=1,
relative_translation=[0, 0, 0])],
translation=[0, 0, 0])
io.addJoint('joint1', 'part1',
points=[[0., 0., 0.]],
axes=[[0., 1., 0.]],
joint_class='PivotJointR',
absolute=True)
io.addJoint('joint2', 'part2', 'slider',
points=[[l1+l2, 0., 0.]],
axes=[[0., 1., 0]],
joint_class='PivotJointR',
absolute=True)
io.addJoint('joint3', 'part1', 'part2',
points=[[l1, 0., 0.]],
axes=[[0., 1., 0.]],
joint_class='PivotJointR',
absolute=True)
io.addInteraction('contact10',
body1_name='slider', contactor1_name='Contact_b_f0',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact11',
body1_name='slider', contactor1_name='Contact_b_f0',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact20',
body1_name='slider', contactor1_name='Contact_h_f0',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact21',
body1_name='slider', contactor1_name='Contact_h_f0',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact30',
body1_name='slider', contactor1_name='Contact_b_f1',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact31',
body1_name='slider', contactor1_name='Contact_b_f1',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact40',
body1_name='slider', contactor1_name='Contact_h_f1',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact41',
body1_name='slider', contactor1_name='Contact_h_f1',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact50',
body1_name='slider', contactor1_name='Contact_b_e0',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact51',
body1_name='slider', contactor1_name='Contact_b_e0',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact60',
body1_name='slider', contactor1_name='Contact_h_e0',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact61',
body1_name='slider', contactor1_name='Contact_h_e0',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact70',
body1_name='slider', contactor1_name='Contact_b_e1',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact71',
body1_name='slider', contactor1_name='Contact_b_e1',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact80',
body1_name='slider', contactor1_name='Contact_h_e1',
body2_name='chamber', contactor2_name='Chamber_contact0',
distance_calculator='cadmbtb',
offset=0.024)
io.addInteraction('contact81',
body1_name='slider', contactor1_name='Contact_h_e1',
body2_name='chamber', contactor2_name='Chamber_contact1',
distance_calculator='cadmbtb',
offset=0.024)
io.addExternalFunction('f1', 'part1', 'setComputeFExtFunction',
'SliderCrankPlugin', 'externalForcesB1')
io.addExternalFunction('f2', 'part2', 'setComputeFExtFunction',
'SliderCrankPlugin', 'externalForcesB2')
io.addExternalFunction('f3', 'slider', 'setComputeFExtFunction',
'SliderCrankPlugin', 'externalForcesS')
io.addExternalBCFunction('fbc', 'part1', [4],
'SliderCrankPlugin', 'prescribedvelocityB1')
io.addNewtonImpactFrictionNSL('contact', mu=0.3, e=0.4)
with Hdf5(mode='r+') as io:
io.run(with_timer=True,
t0=0,
T=1,
h=0.0005,
Newton_max_iter=5)
| |
import base64
import functools
import logging
from collections import namedtuple
from django.conf import settings
from django.contrib.auth import login
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.utils.http import is_safe_url
from django.utils.html import format_html
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from tower import ugettext_lazy as _
from waffle.decorators import waffle_switch
from olympia.amo import messages
from olympia.amo.utils import urlparams
from olympia.api.jwt_auth.views import JWTProtectedView
from olympia.users.models import UserProfile
from olympia.accounts.serializers import (
AccountSourceSerializer, UserProfileSerializer)
from . import verify
log = logging.getLogger('accounts')
STUB_FXA_USER = namedtuple('FxAUser', ['source'])('fxa')
ERROR_NO_CODE = 'no-code'
ERROR_NO_PROFILE = 'no-profile'
ERROR_NO_USER = 'no-user'
ERROR_STATE_MISMATCH = 'state-mismatch'
ERROR_USER_MISMATCH = 'user-mismatch'
ERROR_USER_MIGRATED = 'user-migrated'
ERROR_STATUSES = {
ERROR_NO_CODE: 422,
ERROR_NO_PROFILE: 401,
ERROR_STATE_MISMATCH: 400,
ERROR_USER_MISMATCH: 422,
ERROR_USER_MIGRATED: 422,
}
LOGIN_ERROR_MESSAGES = {
ERROR_NO_CODE:
_(u'Your log in attempt could not be parsed. Please try again.'),
ERROR_NO_PROFILE:
_(u'Your Firefox Account could not be found. Please try again.'),
ERROR_STATE_MISMATCH: _(u'You could not be logged in. Please try again.'),
ERROR_USER_MIGRATED:
_(u'Your account has already been migrated to Firefox Accounts.'),
ERROR_USER_MISMATCH:
_(u'Your Firefox Account already exists on this site.'),
}
def safe_redirect(url, action):
if not is_safe_url(url):
url = reverse('home')
log.info('Redirecting after {} to: {}'.format(action, url))
return HttpResponseRedirect(url)
def find_user(identity):
"""Try to find a user for a Firefox Accounts profile. If the account
hasn't been migrated we'll need to do the lookup by email but we should
use the ID after that so check both. If we get multiple users we're in
some weird state where the accounts need to be merged but that behaviour
hasn't been defined so let it raise.
"""
try:
return UserProfile.objects.get(
Q(fxa_id=identity['uid']) | Q(email=identity['email']))
except UserProfile.DoesNotExist:
return None
except UserProfile.MultipleObjectsReturned:
# This shouldn't happen, so let it raise.
log.error(
'Found multiple users for {email} and {uid}'.format(**identity))
raise
def register_user(request, identity):
user = UserProfile.objects.create_user(
email=identity['email'], username=None, fxa_id=identity['uid'])
log.info('Created user {} from FxA'.format(user))
login(request, user)
return user
def login_user(request, user, identity):
if (user.fxa_id != identity['uid'] or
user.email != identity['email']):
log.info(
'Updating user info from FxA for {pk}. Old {old_email} {old_uid} '
'New {new_email} {new_uid}'.format(
pk=user.pk, old_email=user.email, old_uid=user.fxa_id,
new_email=identity['email'], new_uid=identity['uid']))
if not user.fxa_migrated():
messages.success(
request,
_(u'Great job!'),
_(u'You can now log in to Add-ons with your Firefox Account.'),
extra_tags='fxa')
user.update(fxa_id=identity['uid'], email=identity['email'])
log.info('Logging in user {} from FxA'.format(user))
login(request, user)
def fxa_error_message(message):
login_help_url = (
'https://support.mozilla.org/kb/access-your-add-ons-firefox-accounts')
return format_html(
u'{error} <a href="{url}" target="_blank">{help_text}</a>',
url=login_help_url, help_text=_(u'Need help?'),
error=message)
def render_error(request, error, next_path=None, format=None):
if format == 'json':
status = ERROR_STATUSES.get(error, 422)
return Response({'error': error}, status=status)
else:
if not is_safe_url(next_path):
next_path = None
messages.error(
request, fxa_error_message(LOGIN_ERROR_MESSAGES[error]),
extra_tags='fxa')
if request.user.is_authenticated():
redirect_view = 'users.migrate'
else:
redirect_view = 'users.login'
return HttpResponseRedirect(
urlparams(reverse(redirect_view), to=next_path))
def parse_next_path(state_parts):
next_path = None
if len(state_parts) == 2:
# The = signs will be stripped off so we need to add them back
# but it only cares if there are too few so add 4 of them.
encoded_path = state_parts[1] + '===='
try:
next_path = base64.urlsafe_b64decode(str(encoded_path))
except TypeError:
log.info('Error decoding next_path {}'.format(
encoded_path))
pass
if not is_safe_url(next_path):
next_path = None
return next_path
def with_user(format):
def outer(fn):
@functools.wraps(fn)
def inner(self, request):
data = request.GET if request.method == 'GET' else request.DATA
state_parts = data.get('state', '').split(':', 1)
state = state_parts[0]
next_path = parse_next_path(state_parts)
if 'code' not in data:
log.info('No code provided.')
return render_error(
request, ERROR_NO_CODE, next_path=next_path, format=format)
elif (not request.session.get('fxa_state') or
request.session['fxa_state'] != state):
log.info(
'State mismatch. URL: {url} Session: {session}'.format(
url=data.get('state'),
session=request.session.get('fxa_state'),
))
return render_error(
request, ERROR_STATE_MISMATCH, next_path=next_path,
format=format)
try:
identity = verify.fxa_identify(
data['code'], config=settings.FXA_CONFIG)
except verify.IdentificationError:
log.info('Profile not found. Code: {}'.format(data['code']))
return render_error(
request, ERROR_NO_PROFILE, next_path=next_path,
format=format)
else:
identity_user = find_user(identity)
if request.user.is_authenticated():
if (identity_user is not None
and identity_user != request.user):
log.info('Conflict finding user during FxA login. '
'request.user: {}, identity_user: {}'.format(
request.user.pk, identity_user.pk))
return render_error(
request, ERROR_USER_MISMATCH, next_path=next_path,
format=format)
elif request.user.fxa_migrated():
log.info('User already migrated. '
'request.user: {}, identity_user: {}'.format(
request.user, identity_user))
return render_error(
request, ERROR_USER_MIGRATED, next_path=next_path,
format=format)
else:
user = request.user
else:
user = identity_user
return fn(self, request, user=user, identity=identity,
next_path=next_path)
return inner
return outer
class LoginView(APIView):
@waffle_switch('fxa-auth')
@with_user(format='json')
def post(self, request, user, identity, next_path):
if user is None:
return Response({'error': ERROR_NO_USER}, status=422)
else:
login_user(request, user, identity)
return Response({'email': identity['email']})
class RegisterView(APIView):
@waffle_switch('fxa-auth')
@with_user(format='json')
def post(self, request, user, identity, next_path):
if user is not None:
return Response({'error': 'That account already exists.'},
status=422)
else:
user = register_user(request, identity)
return Response({'email': user.email})
class AuthenticateView(APIView):
@waffle_switch('fxa-auth')
@with_user(format='html')
def get(self, request, user, identity, next_path):
if user is None:
register_user(request, identity)
return safe_redirect(reverse('users.edit'), 'register')
else:
login_user(request, user, identity)
return safe_redirect(next_path, 'login')
class ProfileView(JWTProtectedView, generics.RetrieveAPIView):
serializer_class = UserProfileSerializer
def retrieve(self, request, *args, **kw):
return Response(self.get_serializer(request.user).data)
class AccountSourceView(generics.RetrieveAPIView):
serializer_class = AccountSourceSerializer
@waffle_switch('fxa-auth')
def retrieve(self, request, *args, **kwargs):
email = request.GET.get('email')
try:
user = UserProfile.objects.get(email=email)
except UserProfile.DoesNotExist:
# Use the stub FxA user with source='fxa' when the account doesn't
# exist. This will make it more difficult to discover if an email
# address has an account associated with it.
user = STUB_FXA_USER
return Response(self.get_serializer(user).data)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import random
from functools import reduce
from operator import mul
import random
from nose.tools import ok_
from common import with_seed
import mxnet
from mxnet import nd, autograd, gluon
from mxnet.test_utils import assert_almost_equal, random_arrays, rand_shape_nd, same
@with_seed()
def test_sin():
def sin(x):
return nd.sin(x)
def grad_grad_op(x):
return -nd.sin(x)
def grad_grad_grad_op(x):
return -nd.cos(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sin, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, sin,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_cos():
def cos(x):
return nd.cos(x)
def grad_grad_op(x):
return -nd.cos(x)
def grad_grad_grad_op(x):
return nd.sin(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cos, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, cos,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_tan():
def tan(x):
return nd.tan(x)
def grad_op(x):
return 1 / nd.cos(x)**2
def grad_grad_op(x):
return 2 * tan(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, tan, grad_grad_op)
@with_seed()
def test_sinh():
def sinh(x):
return nd.sinh(x)
def grad_grad_op(x):
return sinh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sinh, grad_grad_op)
@with_seed()
def test_cosh():
def cosh(x):
return nd.cosh(x)
def grad_grad_op(x):
return cosh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cosh, grad_grad_op)
@with_seed()
def test_tanh():
def tanh(x):
return nd.tanh(x)
def grad_op(x):
return 1 / nd.cosh(x)**2
def grad_grad_op(x):
return -2 * tanh(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(
array, tanh, grad_grad_op, rtol=1e-6, atol=1e-6)
@with_seed()
def test_arctan():
def arctan(x):
return nd.arctan(x)
def grad_grad_op(x):
return (-2 * x)/((1 + x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
# Domain of arctan is all real numbers.
# Scale std_dev
array *= random.randint(500, 10000)
check_second_order_unary(array, arctan, grad_grad_op)
@with_seed()
def test_arcsinh():
def arcsinh(x):
return nd.arcsinh(x)
def grad_grad_op(x):
return x/nd.sqrt((nd.square(x)+1)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, arcsinh, grad_grad_op)
@with_seed()
def test_arccosh():
def arccosh(x):
return nd.arccosh(x)
def grad_grad_op(x):
return x/(nd.sqrt(x-1) * nd.sqrt(x+1) * (x+1) * (x-1))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = array * sigma + mu
# Domain of arccosh 1 to infinity.
assert((array > 1).all())
check_second_order_unary(array, arccosh, grad_grad_op)
@with_seed()
def test_arctanh():
def arctanh(x):
return nd.arctanh(x)
def grad_grad_op(x):
return (2 * x)/((1 - x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, arctanh, grad_grad_op)
@with_seed()
def test_radians():
def radians(x):
return nd.radians(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, radians, grad_grad_op)
@with_seed()
def test_relu():
def relu(x):
return nd.relu(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, relu, grad_grad_op)
@with_seed()
def test_log():
def log(x):
return nd.log(x)
def grad_op(x):
return 1/x
def grad_grad_op(x):
return -1/(x**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, log, [grad_op, grad_grad_op], [1, 2])
@with_seed()
def test_log2():
def log2(x):
return nd.log2(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(2))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log2, grad_grad_op)
@with_seed()
def test_log10():
def log10(x):
return nd.log10(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(10))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log10, grad_grad_op)
@with_seed()
def test_reciprocal():
def reciprocal(x):
return nd.reciprocal(x)
def grad_grad_op(x):
return 2 / x**3
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, reciprocal, grad_grad_op)
@with_seed()
def test_abs():
def abs(x):
return nd.abs(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, abs, grad_grad_op)
@with_seed()
def test_clip():
def clip(x):
a_min, a_max = sorted([random.random(), random.random()])
return nd.clip(x, a_min, a_max)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, clip, grad_grad_op)
@with_seed()
def test_dropout():
def dropout(x):
return nd.Dropout(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, dropout, grad_grad_op)
@with_seed()
def test_sigmoid():
def sigmoid(x):
return nd.sigmoid(x)
def grad_op(x):
return sigmoid(x) * (1 - sigmoid(x))
def grad_grad_op(x):
return grad_op(x) * (1 - 2 * sigmoid(x))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sigmoid, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, sigmoid, [grad_op, grad_grad_op], [1, 2])
check_nth_order_unary(array, sigmoid, grad_grad_op, 2)
@with_seed()
def test_sqrt():
def sqrt(x):
return nd.sqrt(x)
def grad_grad_op(x):
return -1/(4 * sqrt(x**3))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, sqrt, grad_grad_op)
@with_seed()
def test_cbrt():
def cbrt(x):
return nd.cbrt(x)
def grad_grad_op(x):
return -2/(9 * cbrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, cbrt, grad_grad_op)
def check_second_order_unary(x, op, grad_grad_op, rtol=None, atol=None):
check_nth_order_unary(x, op, grad_grad_op, 2, rtol, atol)
def check_nth_order_unary(x, op, grad_ops, orders, rtol=None, atol=None):
"""Assert n-th order autograd gradient against expected gradient.
Multiple order of gradients can be checked by passing list of
function computing the particular order gradient and passing the
corresponding list of order.
Note
----
1. Orders should always be monotonically increasing.
2. Elements of grads_ops should correspond to elements of orders
i.e. grads_op = [grad_op, grad_grad_grad_op] should be passed with
orders = [1, 3]
Parameters
----------
x : mxnet.NDArray
Input Array.
op : Callable
Operation to perform on Input Array.
grad_ops : Callable or List of Callable
Function to compute and assert gradient of given order.
orders : int or List of int
Order/s to assert expected and computed gradients.
Returns
-------
None
"""
if isinstance(orders, int):
orders = [orders]
grad_ops = [grad_ops]
assert all(i < j for i, j in zip(orders[0:-1], orders[1:])), \
"orders should be monotonically increasing"
assert len(set(orders)) == len(orders), \
"orders should have unique elements"
highest_order = max(orders)
x = nd.array(x)
x.attach_grad()
expected_grads = [grad_op(x) for grad_op in grad_ops]
computed_grads = []
head_grads = []
# Perform compute.
with autograd.record():
y = op(x)
for current_order in range(1, highest_order+1):
head_grad = nd.random.normal(shape=x.shape)
y = autograd.grad(heads=y, variables=x, head_grads=head_grad,
create_graph=True, retain_graph=True)[0]
if current_order in orders:
computed_grads.append(y)
head_grads.append(head_grad)
# Validate all the gradients.
for order, grad, computed_grad in \
zip(orders, expected_grads, computed_grads):
# Compute expected values.
expected_grad = grad.asnumpy()
for head_grad in head_grads[:order]:
expected_grad *= head_grad.asnumpy()
assert_almost_equal(
expected_grad, computed_grad.asnumpy(), rtol=rtol, atol=atol)
def arange_shape_like(y):
shape = y.shape
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
class NDArrayGenerator(object):
def __init__(self, dim, startdim=1):
self.dim = dim
self.curdim = startdim
def __iter__(self):
return self
@staticmethod
def gen(dimensions):
shape = rand_shape_nd(dimensions, 4)
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
def next(self):
return self.__next__()
def __next__(self):
if self.curdim > self.dim:
raise StopIteration
x = NDArrayGenerator.gen(self.curdim)
self.curdim += 1
return x
def flatten2d_right(x):
s_0 = x.shape[0]
s_1 = reduce(mul, x.shape[1:])
return x.reshape((s_0, s_1))
def flatten2d_left(x):
s_0 = reduce(mul, x.shape[:-1])
s_1 = x.shape[-1]
return x.reshape((s_0, s_1))
@with_seed()
def test_dense_backward_flatten():
print("2nd order gradient for Fully Connected, flatten=True")
for x in NDArrayGenerator(4,2):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=True))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y) # head gradient of y
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
# Expected results
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
ok_(w_grad.shape == w.shape)
ok_(w_grad_grad.shape == w.shape)
ok_(x_grad.shape == x.shape)
ok_(x_grad_grad.shape == x.shape)
w_grad_check = same(flatten2d_right(w_grad), flatten2d_right(w_grad_e))
w_grad_grad_check = same(flatten2d_right(w_grad_grad), flatten2d_right(w_grad_grad_e))
x_grad_check = same(flatten2d_right(x_grad), flatten2d_right(x_grad_e))
x_grad_grad_check = same(flatten2d_right(x_grad_grad), flatten2d_right(x_grad_grad_e))
ok_(x_grad_check)
ok_(w_grad_check)
ok_(x_grad_grad_check)
ok_(w_grad_grad_check)
@with_seed()
def test_dense_backward_no_flatten():
print("2nd order gradient for Fully Connected, flatten=False")
for x in NDArrayGenerator(5,3):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=False))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y) # head gradient of y
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
# Expected results
o_y = flatten2d_left(o_y)
x = flatten2d_left(x)
o_x_grad = flatten2d_left(o_x_grad)
o_w_grad = flatten2d_left(o_w_grad)
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
w_grad_check = same(flatten2d_left(w_grad), flatten2d_left(w_grad_e))
w_grad_grad_check = same(flatten2d_left(w_grad_grad), flatten2d_left(w_grad_grad_e))
x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e))
x_grad_grad_check = same(flatten2d_left(x_grad_grad), flatten2d_left(x_grad_grad_e))
ok_(x_grad_check)
ok_(w_grad_check)
ok_(x_grad_grad_check)
ok_(w_grad_grad_check)
if __name__ == '__main__':
import nose
nose.runmodule()
| |
import logging, copy, pickle
from weakref import ref
from ..returnvalues import ReturnValue
from ..utils import update_info_dict
logger = logging.getLogger('itchat')
class AttributeDict(dict):
def __getattr__(self, value):
keyName = value[0].upper() + value[1:]
try:
return self[keyName]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__.split('.')[-1], value))
def get(self, v, d=None):
try:
return self[v]
except KeyError:
return d
class UnInitializedItchat(object):
def _raise_error(self, *args, **kwargs):
logger.warning('An itchat instance is called before initialized')
def __getattr__(self, value):
return self._raise_error
class ContactList(list):
''' when a dict is append, init function will be called to format that dict '''
def __init__(self, *args, **kwargs):
super(ContactList, self).__init__(*args, **kwargs)
self.__setstate__(None)
@property
def core(self):
return getattr(self, '_core', lambda: fakeItchat)() or fakeItchat
@core.setter
def core(self, value):
self._core = ref(value)
def set_default_value(self, initFunction=None, contactClass=None):
if hasattr(initFunction, '__call__'):
self.contactInitFn = initFunction
if hasattr(contactClass, '__call__'):
self.contactClass = contactClass
def append(self, value):
contact = self.contactClass(value)
contact.core = self.core
if self.contactInitFn is not None:
contact = self.contactInitFn(self, contact) or contact
super(ContactList, self).append(contact)
def __deepcopy__(self, memo):
r = self.__class__([copy.deepcopy(v) for v in self])
r.contactInitFn = self.contactInitFn
r.contactClass = self.contactClass
r.core = self.core
return r
def __getstate__(self):
return 1
def __setstate__(self, state):
self.contactInitFn = None
self.contactClass = User
def __str__(self):
return '[%s]' % ', '.join([repr(v) for v in self])
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__.split('.')[-1],
self.__str__())
class AbstractUserDict(AttributeDict):
def __init__(self, *args, **kwargs):
super(AbstractUserDict, self).__init__(*args, **kwargs)
@property
def core(self):
return getattr(self, '_core', lambda: fakeItchat)() or fakeItchat
@core.setter
def core(self, value):
self._core = ref(value)
def update(self):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not be updated' % \
self.__class__.__name__, }, })
def set_alias(self, alias):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not set alias' % \
self.__class__.__name__, }, })
def set_pinned(self, isPinned=True):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not be pinned' % \
self.__class__.__name__, }, })
def verify(self):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s do not need verify' % \
self.__class__.__name__, }, })
def get_head_image(self, imageDir=None):
return self.core.get_head_img(self.userName, picDir=imageDir)
def delete_member(self, userName):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not delete member' % \
self.__class__.__name__, }, })
def add_member(self, userName):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not add member' % \
self.__class__.__name__, }, })
def send_raw_msg(self, msgType, content):
return self.core.send_raw_msg(msgType, content, self.userName)
def send_msg(self, msg='Test Message'):
return self.core.send_msg(msg, self.userName)
def send_file(self, fileDir, mediaId=None):
return self.core.send_file(fileDir, self.userName, mediaId)
def send_image(self, fileDir, mediaId=None):
return self.core.send_image(fileDir, self.userName, mediaId)
def send_video(self, fileDir=None, mediaId=None):
return self.core.send_video(fileDir, self.userName, mediaId)
def send(self, msg, mediaId=None):
return self.core.send(msg, self.userName, mediaId)
def search_member(self, name=None, userName=None, remarkName=None, nickName=None,
wechatAccount=None):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s do not have members' % \
self.__class__.__name__, }, })
def __deepcopy__(self, memo):
r = self.__class__()
for k, v in self.items():
r[copy.deepcopy(k)] = copy.deepcopy(v)
r.core = self.core
return r
def __str__(self):
return '{%s}' % ', '.join(
['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__.split('.')[-1],
self.__str__())
def __getstate__(self):
return 1
def __setstate__(self, state):
pass
class User(AbstractUserDict):
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self.__setstate__(None)
def update(self):
r = self.core.update_friend(self.userName)
if r:
update_info_dict(self, r)
return r
def set_alias(self, alias):
return self.core.set_alias(self.userName, alias)
def set_pinned(self, isPinned=True):
return self.core.set_pinned(self.userName, isPinned)
def verify(self):
return self.core.add_friend(**self.verifyDict)
def __deepcopy__(self, memo):
r = super(User, self).__deepcopy__(memo)
r.verifyDict = copy.deepcopy(self.verifyDict)
return r
def __setstate__(self, state):
super(User, self).__setstate__(state)
self.verifyDict = {}
self['MemberList'] = fakeContactList
class MassivePlatform(AbstractUserDict):
def __init__(self, *args, **kwargs):
super(MassivePlatform, self).__init__(*args, **kwargs)
self.__setstate__(None)
def __setstate__(self, state):
super(MassivePlatform, self).__setstate__(state)
self['MemberList'] = fakeContactList
class Chatroom(AbstractUserDict):
def __init__(self, *args, **kwargs):
super(Chatroom, self).__init__(*args, **kwargs)
memberList = ContactList()
userName = self.get('UserName', '')
refSelf = ref(self)
def init_fn(parentList, d):
d.chatroom = refSelf() or \
parentList.core.search_chatrooms(userName=userName)
memberList.set_default_value(init_fn, ChatroomMember)
if 'MemberList' in self:
for member in self.memberList:
memberList.append(member)
self['MemberList'] = memberList
@property
def core(self):
return getattr(self, '_core', lambda: fakeItchat)() or fakeItchat
@core.setter
def core(self, value):
self._core = ref(value)
self.memberList.core = value
for member in self.memberList:
member.core = value
def update(self, detailedMember=False):
r = self.core.update_chatroom(self.userName, detailedMember)
if r:
update_info_dict(self, r)
self['MemberList'] = r['MemberList']
return r
def set_alias(self, alias):
return self.core.set_chatroom_name(self.userName, alias)
def set_pinned(self, isPinned=True):
return self.core.set_pinned(self.userName, isPinned)
def delete_member(self, userName):
return self.core.delete_member_from_chatroom(self.userName, userName)
def add_member(self, userName):
return self.core.add_member_into_chatroom(self.userName, userName)
def search_member(self, name=None, userName=None, remarkName=None, nickName=None,
wechatAccount=None):
with self.core.storageClass.updateLock:
if (name or userName or remarkName or nickName or wechatAccount) is None:
return None
elif userName: # return the only userName match
for m in self.memberList:
if m.userName == userName:
return copy.deepcopy(m)
else:
matchDict = {
'RemarkName' : remarkName,
'NickName' : nickName,
'Alias' : wechatAccount, }
for k in ('RemarkName', 'NickName', 'Alias'):
if matchDict[k] is None:
del matchDict[k]
if name: # select based on name
contact = []
for m in self.memberList:
if any([m.get(k) == name for k in ('RemarkName', 'NickName', 'Alias')]):
contact.append(m)
else:
contact = self.memberList[:]
if matchDict: # select again based on matchDict
friendList = []
for m in contact:
if all([m.get(k) == v for k, v in matchDict.items()]):
friendList.append(m)
return copy.deepcopy(friendList)
else:
return copy.deepcopy(contact)
def __setstate__(self, state):
super(Chatroom, self).__setstate__(state)
if not 'MemberList' in self:
self['MemberList'] = fakeContactList
class ChatroomMember(AbstractUserDict):
def __init__(self, *args, **kwargs):
super(AbstractUserDict, self).__init__(*args, **kwargs)
self.__setstate__(None)
@property
def chatroom(self):
r = getattr(self, '_chatroom', lambda: fakeChatroom)()
if r is None:
userName = getattr(self, '_chatroomUserName', '')
r = self.core.search_chatrooms(userName=userName)
if isinstance(r, dict):
self.chatroom = r
return r or fakeChatroom
@chatroom.setter
def chatroom(self, value):
if isinstance(value, dict) and 'UserName' in value:
self._chatroom = ref(value)
self._chatroomUserName = value['UserName']
def get_head_image(self, imageDir=None):
return self.core.get_head_img(self.userName, self.chatroom.userName, picDir=imageDir)
def delete_member(self, userName):
return self.core.delete_member_from_chatroom(self.chatroom.userName, self.userName)
def send_raw_msg(self, msgType, content):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not send message directly' % \
self.__class__.__name__, }, })
def send_msg(self, msg='Test Message'):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not send message directly' % \
self.__class__.__name__, }, })
def send_file(self, fileDir, mediaId=None):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not send message directly' % \
self.__class__.__name__, }, })
def send_image(self, fileDir, mediaId=None):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not send message directly' % \
self.__class__.__name__, }, })
def send_video(self, fileDir=None, mediaId=None):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not send message directly' % \
self.__class__.__name__, }, })
def send(self, msg, mediaId=None):
return ReturnValue({'BaseResponse': {
'Ret': -1006,
'ErrMsg': '%s can not send message directly' % \
self.__class__.__name__, }, })
def __setstate__(self, state):
super(ChatroomMember, self).__setstate__(state)
self['MemberList'] = fakeContactList
def wrap_user_dict(d):
userName = d.get('UserName')
if '@@' in userName:
r = Chatroom(d)
elif d.get('VerifyFlag', 8) & 8 == 0:
r = User(d)
else:
r = MassivePlatform(d)
return r
fakeItchat = UnInitializedItchat()
fakeContactList = ContactList()
fakeChatroom = Chatroom()
| |
# Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
from robot.utils import get_link_path
from selenium.webdriver.remote.webelement import WebElement
from SeleniumLibrary.base import LibraryComponent, keyword
from SeleniumLibrary.utils.path_formatter import _format_path
DEFAULT_FILENAME_PAGE = "selenium-screenshot-{index}.png"
DEFAULT_FILENAME_ELEMENT = "selenium-element-screenshot-{index}.png"
EMBED = "EMBED"
class ScreenshotKeywords(LibraryComponent):
@keyword
def set_screenshot_directory(self, path: Union[None, str]) -> str:
"""Sets the directory for captured screenshots.
``path`` argument specifies the absolute path to a directory where
the screenshots should be written to. If the directory does not
exist, it will be created. The directory can also be set when
`importing` the library. If it is not configured anywhere,
screenshots are saved to the same directory where Robot Framework's
log file is written.
If ``path`` equals to EMBED (case insensitive) and
`Capture Page Screenshot` or `capture Element Screenshot` keywords
filename argument is not changed from the default value, then
the page or element screenshot is embedded as Base64 image to
the log.html.
The previous value is returned and can be used to restore
the original value later if needed.
Returning the previous value is new in SeleniumLibrary 3.0.
The persist argument was removed in SeleniumLibrary 3.2 and
EMBED is new in SeleniumLibrary 4.2.
"""
if path is None:
path = None
elif path.upper() == EMBED:
path = EMBED
else:
path = os.path.abspath(path)
self._create_directory(path)
previous = self._screenshot_root_directory
self._screenshot_root_directory = path
return previous
@keyword
def capture_page_screenshot(self, filename: str = DEFAULT_FILENAME_PAGE) -> str:
"""Takes a screenshot of the current page and embeds it into a log file.
``filename`` argument specifies the name of the file to write the
screenshot into. The directory where screenshots are saved can be
set when `importing` the library or by using the `Set Screenshot
Directory` keyword. If the directory is not configured, screenshots
are saved to the same directory where Robot Framework's log file is
written.
If ``filename`` equals to EMBED (case insensitive), then screenshot
is embedded as Base64 image to the log.html. In this case file is not
created in the filesystem.
Starting from SeleniumLibrary 1.8, if ``filename`` contains marker
``{index}``, it will be automatically replaced with an unique running
index, preventing files to be overwritten. Indices start from 1,
and how they are represented can be customized using Python's
[https://docs.python.org/3/library/string.html#format-string-syntax|
format string syntax].
An absolute path to the created screenshot file is returned or if
``filename`` equals to EMBED, word `EMBED` is returned.
Support for EMBED is new in SeleniumLibrary 4.2
Examples:
| `Capture Page Screenshot` | |
| `File Should Exist` | ${OUTPUTDIR}/selenium-screenshot-1.png |
| ${path} = | `Capture Page Screenshot` |
| `File Should Exist` | ${OUTPUTDIR}/selenium-screenshot-2.png |
| `File Should Exist` | ${path} |
| `Capture Page Screenshot` | custom_name.png |
| `File Should Exist` | ${OUTPUTDIR}/custom_name.png |
| `Capture Page Screenshot` | custom_with_index_{index}.png |
| `File Should Exist` | ${OUTPUTDIR}/custom_with_index_1.png |
| `Capture Page Screenshot` | formatted_index_{index:03}.png |
| `File Should Exist` | ${OUTPUTDIR}/formatted_index_001.png |
| `Capture Page Screenshot` | EMBED |
| `File Should Not Exist` | EMBED |
"""
if not self.drivers.current:
self.info("Cannot capture screenshot because no browser is open.")
return
if self._decide_embedded(filename):
return self._capture_page_screen_to_log()
return self._capture_page_screenshot_to_file(filename)
def _capture_page_screenshot_to_file(self, filename):
path = self._get_screenshot_path(filename)
self._create_directory(path)
if not self.driver.save_screenshot(path):
raise RuntimeError(f"Failed to save screenshot '{path}'.")
self._embed_to_log_as_file(path, 800)
return path
def _capture_page_screen_to_log(self):
screenshot_as_base64 = self.driver.get_screenshot_as_base64()
self._embed_to_log_as_base64(screenshot_as_base64, 800)
return EMBED
@keyword
def capture_element_screenshot(
self,
locator: Union[WebElement, None, str],
filename: str = DEFAULT_FILENAME_ELEMENT,
) -> str:
"""Captures a screenshot from the element identified by ``locator`` and embeds it into log file.
See `Capture Page Screenshot` for details about ``filename`` argument.
See the `Locating elements` section for details about the locator
syntax.
An absolute path to the created element screenshot is returned.
Support for capturing the screenshot from an element has limited support
among browser vendors. Please check the browser vendor driver documentation
does the browser support capturing a screenshot from an element.
New in SeleniumLibrary 3.3. Support for EMBED is new in SeleniumLibrary 4.2.
Examples:
| `Capture Element Screenshot` | id:image_id | |
| `Capture Element Screenshot` | id:image_id | ${OUTPUTDIR}/id_image_id-1.png |
| `Capture Element Screenshot` | id:image_id | EMBED |
"""
if not self.drivers.current:
self.info(
"Cannot capture screenshot from element because no browser is open."
)
return
element = self.find_element(locator, required=True)
if self._decide_embedded(filename):
return self._capture_element_screen_to_log(element)
return self._capture_element_screenshot_to_file(element, filename)
def _capture_element_screenshot_to_file(self, element, filename):
path = self._get_screenshot_path(filename)
self._create_directory(path)
if not element.screenshot(path):
raise RuntimeError(f"Failed to save element screenshot '{path}'.")
self._embed_to_log_as_file(path, 400)
return path
def _capture_element_screen_to_log(self, element):
self._embed_to_log_as_base64(element.screenshot_as_base64, 400)
return EMBED
@property
def _screenshot_root_directory(self):
return self.ctx.screenshot_root_directory
@_screenshot_root_directory.setter
def _screenshot_root_directory(self, value):
self.ctx.screenshot_root_directory = value
def _decide_embedded(self, filename):
filename = filename.lower()
if (
filename == DEFAULT_FILENAME_PAGE
and self._screenshot_root_directory == EMBED
):
return True
if (
filename == DEFAULT_FILENAME_ELEMENT
and self._screenshot_root_directory == EMBED
):
return True
if filename == EMBED.lower():
return True
return False
def _get_screenshot_path(self, filename):
if self._screenshot_root_directory != EMBED:
directory = self._screenshot_root_directory or self.log_dir
else:
directory = self.log_dir
filename = filename.replace("/", os.sep)
index = 0
while True:
index += 1
formatted = _format_path(filename, index)
path = os.path.join(directory, formatted)
# filename didn't contain {index} or unique path was found
if formatted == filename or not os.path.exists(path):
return path
def _create_directory(self, path):
target_dir = os.path.dirname(path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def _embed_to_log_as_base64(self, screenshot_as_base64, width):
# base64 image is shown as on its own row and thus previous row is closed on
# purpose. Depending on Robot's log structure is a bit risky.
self.info(
'</td></tr><tr><td colspan="3">'
'<img alt="screenshot" class="robot-seleniumlibrary-screenshot" '
f'src="data:image/png;base64,{screenshot_as_base64}" width="{width}px">',
html=True,
)
def _embed_to_log_as_file(self, path, width):
# Image is shown on its own row and thus previous row is closed on
# purpose. Depending on Robot's log structure is a bit risky.
src = get_link_path(path, self.log_dir)
self.info(
'</td></tr><tr><td colspan="3">'
f'<a href="{src}"><img src="{src}" width="{width}px"></a>',
html=True,
)
| |
#!/usr/bin/python
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import sys
import array
import random
from deap import base
from deap import creator
from deap import tools
import fgeneric
import numpy as np
from operator import attrgetter
import bbobbenchmarks as bn
toolbox = base.Toolbox()
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode="d",
fitness=creator.FitnessMin)
# pool = multiprocessing.Pool()
# toolbox.register("map", futures.map)
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def main(func,
NGEN,
CXPB,
MUTPB,
dim,
ftarget,
tournsize,
n_aval,
):
toolbox.register("attr_float", random.random)
toolbox.register("select", tools.selTournament, tournsize=tournsize)
toolbox.register(
"mutate",
tools.mutGaussian,
mu=0,
sigma=1,
indpb=0.1
)
# mutShuffleIndexes
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# calculating the number of individuals of the
# populations based on the number of executions
y = int(n_aval / NGEN)
x = n_aval - y * NGEN
n = x + y
toolbox.register("evaluate", func)
toolbox.decorate("evaluate", tupleize)
toolbox.register("attr_float", random.uniform, -4, 4)
toolbox.register("mate", tools.cxUniform)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_float, dim)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
logbook = tools.Logbook()
logbook.header = "gen", "min", "avg", "max", "std"
pop = toolbox.population(n)
# get initial pop
filename = ("../init_pops/init_pop_f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_2.txt")
if((np.DataSource().exists(filename)) is True):
with open(filename, 'r') as f:
a = eval(f.readline())
f.close()
for index in range(len(pop[0])):
pop[0][index] = a[index]
# Evaluate the entire population
# 2 model.bins: real data, generated model
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
# numero_avaliacoes = len(pop)
# normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# create offspring
offspring = list(toolbox.map(toolbox.clone, pop))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2, 0.1)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring,
# but the last ind replaced by best_pop
# Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"))
offspring[0] = best_pop
random.shuffle(offspring)
pop[:] = offspring
record = stats.compile(pop)
logbook.record(gen=g, **record)
if record["std"] < 10e-12:
best_pop = tools.selBest(pop, 1)[0]
pop = toolbox.population(n)
pop = sorted(pop, key=attrgetter("fitness"))
pop[0] = best_pop
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
g += 1
record = stats.compile(pop)
logbook.record(gen=g, **record)
return logbook
if __name__ == "__main__":
for i in range(len(sys.argv) - 1):
if (sys.argv[i] == '-params'):
gaParams = sys.argv[i + 1]
elif (sys.argv[i] == '-tournsize'):
tournsize = int(sys.argv[i + 1])
f = open(gaParams, "r")
keys = ['key', 'NGEN', 'n_aval', 'CXPB', 'MUTPB', 'dim', 'seed', 'tournsize']
params = dict()
for line in f:
if line[0] == '#':
continue
tokens = line.split()
for key, value in zip(keys, tokens):
if key == 'key':
params[key] = value
elif key == 'CXPB' or key == 'MUTPB':
params[key] = float(value)
else:
params[key] = int(value)
f.close()
# Maximum number of restart for an algorithm that detects stagnation
# Create a COCO experiment that will log the results under the
# ./output directory
e = fgeneric.LoggingFunction('output')
# Iterate over all desired test dimensions
# for dim in (2, 3, 5, 10, 20, 40):
dim = params['dim']
# Set the maximum number function evaluation granted to the algorithm
# This is usually function of the dimensionality of the problem
# Iterate over a set of benchmarks (noise free benchmarks here)
# for f_name in bn.nfreeIDs:
f_name = 23
# Iterate over all the instance of a single problem
# Rotation, translation, etc.
# for instance in chain(range(1, 6), range(21, 31)):
instance = 1
# Set the function to be used (problem) in the logger
e.setfun(*bn.instantiate(f_name, iinstance=1))
# Independent restarts until maxfunevals or ftarget is reached
# Run the algorithm with the remaining
# number of evaluations
random.seed(params['seed'])
logbook = main(e.evalfun,
NGEN=params['NGEN'],
CXPB=params['CXPB'],
MUTPB=params['MUTPB'],
dim=dim,
n_aval=params['n_aval'],
tournsize=tournsize,
ftarget=e.ftarget)
filename = ("../pseudo-adaptative/f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_" +
str(tournsize) +
".txt")
with open(filename, "a") as myfile:
myfile.write(str(logbook))
myfile.write(str('\n'))
myfile.close()
| |
'''
Created on Dec 18, 2012
@author: Gary
'''
from housemonitor.inputs.processinput import ProcessInput
from housemonitor.inputs.processinput import ProcessXBeeInput
from housemonitor.inputs.processinput import ProcessStatusRequests
from housemonitor.inputs.processinput import ProcessCommandInput
from housemonitor.inputs.dataenvelope import DataEnvelope
from housemonitor.lib.hmqueue import HMQueue
from housemonitor.configuration.xmlDeviceConfiguration import xmlDeviceConfiguration
from housemonitor.configuration.xmlDeviceConfiguration import InvalidDeviceError
from housemonitor.configuration.xmlDeviceConfiguration import InvalidPortError
from housemonitor.configuration.xmlDeviceConfiguration import InvalidConfigurationOptionError
import unittest
import datetime
from housemonitor.lib.common import Common
import logging.config
from housemonitor.lib.constants import Constants
import pprint
from mock import Mock, MagicMock, patch
from housemonitor.lib.getdatetime import GetDateTime
class Test( unittest.TestCase ):
logger = logging.getLogger( 'UnitTest' )
valid_devices_configuration = {'0x13a200408cccc3': {'adc-0': {'cosm_channel': '3',
'description': 'The temperature in the sunroom',
'name': 'Indoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'adc-1': {'cosm_channel': '3',
'description': 'The temperature at 100 West Lisa Drive Austin TX',
'name': 'Outdoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'name': 'Sunroom',
'network_address': '0xf9f2'},
'0x13a200409029bf': {'adc-1': {'cosm_channel': '2',
'description': 'The temperature above the garage door',
'name': 'Garage Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue',
'step.oneInN',
'outputs.COSM'],
'units': 'F'},
'dio-0': {'cosm_channel': '1',
'description': 'Monitors whether the garage door is open or closed.',
'name': 'Garage Door Monitor',
'network_address': '0xf9f2',
'steps': ['step.garage_door_state',
'step.CurrentValue',
'step.onBooleanChange',
'outputs.COSM']},
'name': 'Garage Door XBee Monitor',
'network_address': '0xf9f2'}}
def setUp( self ):
logging.config.fileConfig( "unittest_logging.conf" )
def tearDown( self ):
pass
# ProcessXBeeInput
def test_ProcessXBeeInput_logger_name( self ):
devices = {'device': {'port': {}}}
pxi = ProcessXBeeInput( devices )
self.assertEqual( pxi.logger_name, Constants.LogKeys.inputsZigBee )
@patch( 'housemonitor.inputs.processinput.Common.send' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def test_XBeeInput_process_invalid_device_error( self, config, send ):
data = {'source_addr_long': '\x00\x13\xa2\x00@\x90)\xbf',
'source_addr': '\xf9\xf2',
'id': 'rx_io_data_long_addr',
'samples': [{'adc-1': 622}],
'options': '\x01'}
env = DataEnvelope( Constants.EnvelopeTypes.XBEE, **data )
xd = xmlDeviceConfiguration()
xd.devices = {'0x13a200408cccc3': {'adc-0': {'cosm_channel': '3',
'description': 'The temperature in the sunroom',
'name': 'Indoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'adc-1': {'cosm_channel': '3',
'description': 'The temperature at 100 West Lisa Drive Austin TX',
'name': 'Outdoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'name': 'Sunroom',
'network_address': '0xf9f2'}}
xp = ProcessXBeeInput( xd )
xp.logger.exception = MagicMock()
value = xp.process( env )
xp.logger.exception.assert_called_with( "'Invalid device (0x13a200409029bf)'" )
self.assertEqual( send.call_count, 0 )
@patch( 'housemonitor.inputs.processinput.Common.send' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def test_XBeeInput_process_invalid_port_error( self, config, send ):
data = {'source_addr_long': '\x00\x13\xa2\x00@\x90)\xbf',
'source_addr': '\xf9\xf2',
'id': 'rx_io_data_long_addr',
'samples': [{'adc-3': 622}],
'options': '\x01'}
env = DataEnvelope( Constants.EnvelopeTypes.XBEE, **data )
xd = xmlDeviceConfiguration()
xd.devices = {'0x13a200409029bf': {'adc-0': {'cosm_channel': '3',
'description': 'The temperature in the sunroom',
'name': 'Indoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'adc-1': {'cosm_channel': '3',
'description': 'The temperature at 100 West Lisa Drive Austin TX',
'name': 'Outdoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'name': 'Sunroom',
'network_address': '0xf9f2'}}
xp = ProcessXBeeInput( xd )
xp.logger.exception = MagicMock()
value = xp.process( env )
xp.logger.exception.assert_called_with( "'Invalid port (adc-3)'" )
self.assertEqual( send.call_count, 0 )
@patch( 'housemonitor.inputs.processinput.Common.send' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def test_XBeeInput_process_invalid_configuration_options_error( self, config, send ):
data = {'source_addr_long': '\x00\x13\xa2\x00@\x90)\xbf',
'source_addr': '\xf9\xf2',
'id': 'rx_io_data_long_addr',
'samples': [{'adc-0': 622}],
'options': '\x01'}
env = DataEnvelope( Constants.EnvelopeTypes.XBEE, **data )
xd = xmlDeviceConfiguration()
xd.devices = {'0x13a200409029bf': {'adc-0': {'cosm_channel': '3',
'description': 'The temperature in the sunroom',
'name': 'Indoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue']},
'adc-1': {'cosm_channel': '3',
'description': 'The temperature at 100 West Lisa Drive Austin TX',
'name': 'Outdoor Temperature',
'steps': ['step.ZigbeeAnalogNumberToVolts',
'step.TMP_36_Volts_to_Centigrade',
'step.Centigrade_to_Fahrenheit',
'step.Average',
'step.FormatValue',
'step.CurrentValue'],
'units': 'F'},
'name': 'Sunroom',
'network_address': '0xf9f2'}}
xp = ProcessXBeeInput( xd )
xp.logger.exception = MagicMock()
value = xp.process( env )
xp.logger.exception.assert_called_with( "'Required configuration option not present (units) for device(0x13a200409029bf) port (adc-0)'" )
self.assertEqual( send.call_count, 0 )
@patch( 'housemonitor.inputs.processinput.datetime' )
@patch( 'housemonitor.inputs.processinput.Common.send' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def test_XBeeInput_process_with_valid_data( self, config, send, dt ):
test_time = datetime.datetime( 2012, 1, 2, 3, 4, 5 )
data = {'source_addr_long': '\x00\x13\xa2\x00@\x90)\xbf',
'source_addr': '\xf9\xf2',
'id': 'rx_io_data_long_addr',
'samples': [{'adc-1': 622}],
'options': '\x01'}
env = DataEnvelope( Constants.EnvelopeTypes.XBEE, **data )
xd = xmlDeviceConfiguration()
xd.devices = self.valid_devices_configuration
xp = ProcessXBeeInput( xd )
dt.utcnow.return_value = 123
xp.process( env )
send.assert_called_once_with( 622, {'name': 'Garage Temperature', 'units': 'F', 'steps': ['step.ZigbeeAnalogNumberToVolts', 'step.TMP_36_Volts_to_Centigrade', 'step.Centigrade_to_Fahrenheit', 'step.Average', 'step.FormatValue', 'step.CurrentValue', 'step.oneInN', 'outputs.COSM'], 'at': 123, 'device': '0x13a200409029bf', 'port': 'adc-1'}, ['step.ZigbeeAnalogNumberToVolts', 'step.TMP_36_Volts_to_Centigrade', 'step.Centigrade_to_Fahrenheit', 'step.Average', 'step.FormatValue', 'step.CurrentValue', 'step.oneInN', 'outputs.COSM'] )
# ProcessCommandInput
def test_ProcessCommandInputs_logger_name( self ):
devices = {'device': {'port': {}}}
psr = ProcessCommandInput( devices )
self.assertEqual( psr.logger_name, Constants.LogKeys.INPUT_COMMANDS )
@patch( 'housemonitor.inputs.processinput.Common.send' )
def test_ProcessCommandInputs_process( self, send ):
devices = {'device': {'port': {}}}
env = DataEnvelope( Constants.EnvelopeTypes.STATUS, steps=['a', 'b', 'c'], value=555 )
psr = ProcessCommandInput( devices )
psr.process( env )
send.assert_called_once_with( 555, env.args, ['a', 'b', 'c'] )
# ProcessStatusRequests
def test_ProcessStatusRequests_logger_name( self ):
devices = {'device': {'port': {}}}
psr = ProcessStatusRequests( devices )
self.assertEqual( psr.logger_name, Constants.LogKeys.INPUT_STATUS )
@patch( 'housemonitor.inputs.processinput.Common.send' )
def test_ProcessStatusRequests_process( self, send ):
devices = {'device': {'port': {}}}
data = {Constants.EnvelopeContents.STEPS: ['a', 'b', 'c'], Constants.EnvelopeContents.VALUE: 555}
env = DataEnvelope( Constants.EnvelopeTypes.STATUS, **data )
psr = ProcessStatusRequests( devices )
psr.process( env )
send.assert_called_once_with( 555, env.args, env[Constants.EnvelopeContents.STEPS] )
# ProcessInput
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def testProcessInput_topic_name( self, config ):
devices = {'device': {'port': {}}}
pi = ProcessInput( devices )
self.assertEqual( pi.topic_name, Constants.TopicNames.ProcessInputs )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def testProcessInput_configuration_file_name( self, config ):
devices = {'device': {'port': {}}}
pi = ProcessInput( devices )
self.assertEqual( pi.configuration_file_name, 'housemonitor.inputs.processinput' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def test_ProcessInput_logger_name( self, config ):
devices = {'device': {'port': {}}}
pi = ProcessInput( devices )
self.assertEqual( pi.logger_name, Constants.LogKeys.inputs )
@patch.object( ProcessXBeeInput, 'process' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
@patch.object( HMQueue, 'receive' )
def test_ProcessInput_work_xbee_input( self, process, config, receive ):
envelope = DataEnvelope( Constants.EnvelopeTypes.XBEE )
que = HMQueue()
pi = ProcessInput( que )
que.receive.return_value = envelope
pi.work()
que.receive.assert_called_oncy_with()
pi.commands[envelope.type].process.assert_called_once_with( envelope )
@patch.object( ProcessStatusRequests, 'process' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
@patch.object( HMQueue, 'receive' )
def test_ProcessInput_work_status_request( self, process, config, receive ):
envelope = DataEnvelope( Constants.EnvelopeTypes.STATUS )
que = HMQueue()
pi = ProcessInput( que )
que.receive.return_value = envelope
pi.work()
que.receive.assert_called_oncy_with()
pi.commands[envelope.type].process.assert_called_once_with( envelope )
def side_effect( self ):
self.pi.forever = False
@patch.object( HMQueue, 'receive' )
@patch.object( ProcessInput, 'work' )
@patch( 'housemonitor.inputs.processinput.xmlDeviceConfiguration.configure' )
def test_input( self, receive, work, config ):
que = HMQueue()
self.pi = ProcessInput( que )
work.side_effect = self.side_effect
self.pi.input()
self.pi.work.assert_called_once_with()
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main() # pragma: no cover
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# aiohttp documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 5 12:35:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import codecs
import re
_docs_path = os.path.dirname(__file__)
_version_path = os.path.abspath(os.path.join(_docs_path,
'..', 'aiohttp', '__init__.py'))
with codecs.open(_version_path, 'r', 'latin1') as fp:
try:
_version_info = re.search(r"^__version__ = '"
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r"(?P<tag>.*)?'$",
fp.read(), re.M).groupdict()
except IndexError:
raise RuntimeError('Unable to determine version.')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
import alabaster
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'alabaster',
'sphinxcontrib.asyncio',
'sphinxcontrib.newsfeed',
]
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'multidict':
('http://multidict.readthedocs.org/en/stable/', None),
'aiohttpjinja2':
('http://aiohttp-jinja2.readthedocs.org/en/stable/', None),
'aiohttpsession':
('http://aiohttp-session.readthedocs.org/en/stable/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'aiohttp'
copyright = '2013-2016, KeepSafe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{major}.{minor}'.format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = '{major}.{minor}.{patch}-{tag}'.format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The default language to highlight source code in.
highlight_language = 'python3'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'aiohttp-icon-128x128.png',
'description': 'http client/server for asyncio',
'github_user': 'KeepSafe',
'github_repo': 'aiohttp',
'github_button': True,
'github_banner': True,
'travis_button': True,
'pre_bg': '#FFF6E5',
'note_bg': '#E5ECD1',
'note_border': '#BFCF8C',
'body_text': '#482C0A',
'sidebar_text': '#49443E',
'sidebar_header': '#4B4032',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'aiohttp-icon.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'aiohttp-icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiohttpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'aiohttp.tex', 'aiohttp Documentation',
'KeepSafe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aiohttp', 'aiohttp Documentation',
['KeepSafe'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'aiohttp', 'aiohttp Documentation',
'KeepSafe', 'aiohttp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
disqus_shortname = 'aiohttp'
| |
# -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
redirect(URL(f="staff", args="summary"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined Staff/Volunteers
Used for Summary view, Imports and S3AddPersonWidget2
"""
return s3db.hrm_human_resource_controller()
# -----------------------------------------------------------------------------
def staff():
""" Staff Controller """
# Staff only
s3.filter = FS("type") == 1
def prep(r):
table = r.table
tablename = r.tablename
get_vars = r.get_vars
# Use CRUD strings for staff
crud_strings = s3.crud_strings
crud_strings[tablename] = crud_strings["hrm_staff"]
resource = r.resource
if "expiring" in get_vars:
# Filter for staff with contracts expiring in the next 4 weeks
query = FS("end_date") < \
(request.utcnow + datetime.timedelta(weeks=4))
resource.add_filter(query)
# Adapt CRUD strings
crud_strings[tablename].title_list = \
T("Staff with Contracts Expiring in the next Month")
# Reconfigure
resource.configure(# Sort by Expiry
sortby = table.end_date,
# Remove the Add button
insertable=False
)
# Adapt list_fields
list_fields = [(T("Contract End Date"), "end_date"),
"person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
]
else:
# Adapt list_fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
if settings.get_hrm_use_trainings():
list_fields.append("person_id$training.course_id")
if settings.get_hrm_use_certificates():
list_fields.append("person_id$certification.certificate_id")
list_fields.append((T("Contract End Date"), "end_date"))
list_fields.append("status")
resource.configure(list_fields = list_fields)
if r.interactive:
if r.id:
if r.method not in ("profile", "delete"):
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
args = []
if r.representation == "iframe":
vars["format"] = "iframe"
args = [r.method]
redirect(URL(f="person", vars=vars, args=args))
else:
if r.method == "import":
# Redirect to person controller
redirect(URL(f="person",
args="import",
vars={"group": "staff"}))
elif not r.component and r.method != "delete":
# Configure site_id
field = table.site_id
site_id = get_vars.get("site_id", None)
if site_id:
field.default = site_id
field.writable = False
field.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (
settings.get_org_site_label(),
T("The facility where this position is based."),
#messages.AUTOCOMPLETE_HELP,
)))
#field.comment = S3AddResourceLink(c="org", f="facility",
# vars = dict(child="site_id",
# parent="req"),
# title=T("Add New Site"),
# )
# Hide status field
table.status.writable = table.status.readable = False
# Assume staff only between 16-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972,
future=-192)
elif r.representation == "xls":
# Make it match Import sheets
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append(
{"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
#s3.scripts.append("/%s/static/scripts/jquery.doubleScroll.js" % appname)
#s3.jquery_ready.append('''$('.dataTable_table').doubleScroll()''')
#s3.jquery_ready.append('''$('.dataTables_wrapper').doubleScroll()''')
elif r.representation == "plain":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
return s3db.hrm_person_controller()
# -----------------------------------------------------------------------------
def profile():
"""
Profile Controller
- includes components relevant to HRM
"""
request.args = [str(s3_logged_in_person())]
# Custom Method for Contacts
s3db.set_method("pr", resourcename,
method = "contacts",
action = s3db.pr_contacts)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person",
asset_asset = "assigned_to_id",
)
group = get_vars.get("group", "staff")
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
s3db.configure(tablename,
deletable = False,
)
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# CRUD pre-process
def prep(r):
if r.interactive and r.method != "import":
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
else:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
return output
s3.postp = postp
output = s3_rest_controller("pr", "person",
rheader = s3db.hrm_rheader,
)
return output
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
return s3db.hrm_group_controller()
# -----------------------------------------------------------------------------
def group_membership():
"""
Membership controller
- uses the group_membership table from PR
"""
# Change Labels & list_fields
s3db.hrm_configure_pr_group_membership()
# Only show Relief Teams
# Do not show system groups
# Only show Staff
table = db.pr_group_membership
gtable = db.pr_group
htable = s3db.hrm_human_resource
s3.filter = (gtable.system == False) & \
(gtable.group_type == 3) & \
(htable.type == 1) & \
(htable.person_id == table.person_id)
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group_membership",
csv_template="group_membership",
csv_stylesheet=("hrm", "group_membership.xsl"),
)
return output
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_department)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
s3.filter = FS("type").belongs((1, 3))
if not auth.s3_has_role(ADMIN):
s3.filter &= auth.filter_by_root_org(s3db.hrm_job_title)
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# -----------------------------------------------------------------------------
def credential():
""" Credentials Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def competency():
"""
RESTful CRUD controller used to allow searching for people by Skill
"""
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_competency_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
# =============================================================================
# Salaries
# =============================================================================
def staff_level():
""" Staff Levels Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
def salary_grade():
""" Salary Grade Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# =============================================================================
# Insurance Information
# =============================================================================
def insurance():
""" Insurance Information Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# =============================================================================
# Awards
# =============================================================================
def award_type():
""" Award Type Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
def award():
""" Awards Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# =============================================================================
# Disciplinary Record
# =============================================================================
def disciplinary_type():
""" Disciplinary Type Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
def disciplinary_action():
""" Disciplinary Action Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| |
from django.conf.urls import url
from django.db.models.query_utils import Q
from django.http.response import HttpResponse
from tastypie.bundle import Bundle
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from tastypie.utils.mime import build_content_type
from cvservices.models import ControlledVocabulary
from .models import Scheme, FieldRelation
from rdflib import Graph, URIRef, Literal
from rdflib import Namespace
from rdflib.namespace import SKOS, RDF
from collections import OrderedDict
import csv
import StringIO
class RdfSerializer(Serializer):
formats = ['json', 'skos', 'csv']
content_types = {
'json': 'application/json',
'skos': 'text/plain',
'csv': 'text/csv'
}
def to_csv(self, data, options=None):
first = True
options = options or {}
excluded_fields = [u'vocabulary_id', u'vocabulary_status', u'resource_uri']
raw_data = StringIO.StringIO()
data = self.to_simple(data, options)
# Entire CV
if "meta" in data.keys():
objects = data.get("objects")
for value in objects:
test = {}
for excluded_field in excluded_fields:
del value[excluded_field]
self.flatten(value, test)
odict = OrderedDict()
odict['term'] = test['term']
del test['term']
odict['name'] = test['name']
del test['name']
odict['definition'] = test['definition']
del test['definition']
odict['category'] = test['category']
del test['category']
odict['provenance'] = test['provenance']
del test['provenance']
odict['provenance_uri'] = test['provenance_uri']
del test['provenance_uri']
odict['note'] = test['note']
del test['note']
if 'default_unit' in test:
odict['default_unit'] = test['default_unit']
del test['default_unit']
if 'dimension_symbol' in test:
odict['dimension_symbol'] = test['dimension_symbol']
del test['dimension_symbol']
if 'dimension_length' in test:
odict['dimension_length'] = test['dimension_length']
del test['dimension_length']
if 'dimension_mass' in test:
odict['dimension_mass'] = test['dimension_mass']
del test['dimension_mass']
if 'dimension_time' in test:
odict['dimension_time'] = test['dimension_time']
del test['dimension_time']
if 'dimension_current' in test:
odict['dimension_current'] = test['dimension_current']
del test['dimension_current']
if 'dimension_temperature' in test:
odict['dimension_temperature'] = test['dimension_temperature']
del test['dimension_temperature']
if 'dimension_amount' in test:
odict['dimension_amount'] = test['dimension_amount']
del test['dimension_amount']
if 'dimension_light' in test:
odict['dimension_light'] = test['dimension_light']
del test['dimension_light']
odict.update(test)
writer = csv.DictWriter(raw_data, odict.keys())
if first:
writer = csv.DictWriter(raw_data, odict.keys())
writer.writeheader()
writer.writerow(odict)
first = False
else:
writer.writerow({k: (v.encode('utf-8') if isinstance(v, int) is not True and isinstance(v, type(
None)) is not True else v) for k, v in odict.items()})
# Single Term
else:
test = {}
for excluded_field in excluded_fields:
del data[excluded_field]
self.flatten(data, test)
odict = OrderedDict()
odict['term'] = test['term']
del test['term']
odict['name'] = test['name']
del test['name']
odict['definition'] = test['definition']
del test['definition']
odict['category'] = test['category']
del test['category']
odict['provenance'] = test['provenance']
del test['provenance']
odict['provenance_uri'] = test['provenance_uri']
del test['provenance_uri']
odict['note'] = test['note']
del test['note']
if 'default_unit' in test:
odict['default_unit'] = test['default_unit']
del test['default_unit']
if 'dimension_symbol' in test:
odict['dimension_symbol'] = test['dimension_symbol']
del test['dimension_symbol']
if 'dimension_length' in test:
odict['dimension_length'] = test['dimension_length']
del test['dimension_length']
if 'dimension_mass' in test:
odict['dimension_mass'] = test['dimension_mass']
del test['dimension_mass']
if 'dimension_time' in test:
odict['dimension_time'] = test['dimension_time']
del test['dimension_time']
if 'dimension_current' in test:
odict['dimension_current'] = test['dimension_current']
del test['dimension_current']
if 'dimension_temperature' in test:
odict['dimension_temperature'] = test['dimension_temperature']
del test['dimension_temperature']
if 'dimension_amount' in test:
odict['dimension_amount'] = test['dimension_amount']
del test['dimension_amount']
if 'dimension_light' in test:
odict['dimension_light'] = test['dimension_light']
del test['dimension_light']
odict.update(test)
writer = csv.DictWriter(raw_data, odict.keys())
if first:
writer.writeheader()
writer.writerow({k: (
v.encode('utf-8') if isinstance(v, int) is not True and isinstance(v, type(
None)) is not True else v) for k, v in odict.items()})
first = False
else:
writer.writerow(odict)
csv_content = raw_data.getvalue()
return csv_content
def flatten(self, data, odict={}):
if isinstance(data, list):
for value in data:
self.flatten(value, odict)
elif isinstance(data, dict):
for (key, value) in data.items():
if not isinstance(value, (dict, list)):
odict[key] = value
else:
self.flatten(value, odict)
def to_skos(self, data, options=None):
"""
Given some data, converts that data to an rdf skos format in xml.
"""
# element = {}
# get scheme: resource being requested. actionTypeCV, methodTypeCV, etc.
scheme = Scheme.objects.get(name=options['scheme'])
excluded_fields = [u'term', u'resource_uri', u'vocabulary_id', u'vocabulary_status']
baseURI = 'http://vocabulary.odm2.org/ODM2/ODM2Terms/'
graph = Graph()
odm2 = Namespace(baseURI)
dc = Namespace('http://purl.org/dc/elements/1.1/')
graph.bind('odm2', odm2)
graph.bind('skos', SKOS)
graph.bind('dc', dc)
# If requesting an entire CV.
if isinstance(data, dict):
# print data
# Add a SKOS ConceptScheme class to the graph.
(graph.add((URIRef(scheme.uri), RDF['type'],
SKOS['ConceptScheme'])))
(graph.add((URIRef(scheme.uri), dc['title'],
Literal(scheme.title))))
(graph.add((URIRef(scheme.uri), dc['creator'],
Literal(scheme.creator))))
(graph.add((URIRef(scheme.uri), dc['description'],
Literal(scheme.description))))
# For each concept in the requested CV, create a SKOS Concept class.
for concept in data[u'objects']:
(graph.add((URIRef(scheme.uri + '/' + concept.obj.term),
RDF['type'], SKOS['Concept'])))
(graph.add((URIRef(scheme.uri + '/' + concept.obj.term),
SKOS['inScheme'], URIRef(scheme.uri))))
# Add labels to each concept class.
for x in concept.data:
label = concept.data[x]
if isinstance(label, type(None)):
label = ''
if isinstance(label, int):
label = str(label)
# Skip excluded field elements.
if x in excluded_fields:
continue
# Skip empty elements.
elif label.rstrip('\r\n') == '':
continue
else:
alias = str(FieldRelation.objects.get(
field_name=x).node.namespace)
if alias == 'odm2':
(graph.add((URIRef(scheme.uri + '/' +
concept.obj.term),
odm2[FieldRelation.objects
.get(field_name=x).node.name],
Literal(
label.rstrip('\r\n')))))
else:
(graph.add((URIRef(scheme.uri + '/' +
concept.obj.term),
SKOS[FieldRelation.objects
.get(field_name=x).node.name],
Literal(label.rstrip('\r\n')))))
# If requesting a single Concept
elif isinstance(data, Bundle):
# Add a SKOS ConceptScheme class to the graph.
(graph.add((URIRef(scheme.uri), RDF['type'],
SKOS['ConceptScheme'])))
(graph.add((URIRef(scheme.uri), dc['title'],
Literal(scheme.title))))
(graph.add((URIRef(scheme.uri), dc['creator'],
Literal(scheme.creator))))
(graph.add((URIRef(scheme.uri), dc['description'],
Literal(scheme.description))))
# Add a SKOS Concept class to the graph.
(graph.add((URIRef(scheme.uri + '/' + data.obj.term),
RDF['type'], SKOS['Concept'])))
(graph.add((URIRef(scheme.uri + '/' + data.obj.term),
SKOS['inScheme'], URIRef(scheme.uri))))
# Add labels within concept class.
for field in data.data.keys():
label = data.data[field]
if isinstance(label, type(None)):
label = ''
if isinstance(label, int):
label = str(label)
if field in excluded_fields:
continue
elif label.rstrip('\r\n') == '':
continue
else:
relation = FieldRelation.objects.get(field_name=field)
alias = relation.node.namespace.alias
if alias == u'odm2':
(graph.add((URIRef(scheme.uri + '/' + data.obj.term),
odm2[FieldRelation.objects
.get(field_name=field).node.name],
Literal(label.rstrip('\r\n')))))
else:
(graph.add((URIRef(scheme.uri + '/' + data.obj.term),
SKOS[FieldRelation.objects
.get(field_name=field).node.name],
Literal(label.rstrip('\r\n')))))
else:
pass
# Returning the graph serialized into 'xml' format rather than
# 'pretty-xml' so that the Concept Scheme remains on its own level,
# rather than inside one of the concepts.
return graph.serialize(format='xml')
class ModelRdfResource(ModelResource):
scheme = None
vocabulary_filter = Q(vocabulary_status=ControlledVocabulary.CURRENT)
class Meta:
max_limit = 0
detail_uri_name = 'term'
serializer = RdfSerializer()
def prepend_urls(self):
return [
url(r'^(?P<resource_name>%s)/(?P<term>[\w\.-]+)/$' % self._meta.resource_name,
self.wrap_view('dispatch_detail'), name='api_dispatch_detail'),
]
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = (
self.serialize(request, data, desired_format,
options={'scheme': self.scheme}))
return (response_class(content=serialized,
content_type=build_content_type(desired_format),
**response_kwargs))
| |
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
INFO_SRC_V3 = 3
ACTIVATESNAPVX = 4
DEACTIVATESNAPVX = 19
SNAPSYNCTYPE = 7
class EMCVMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection to the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_volume_from_sg():
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
return do_create_volume_from_sg()
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return self.create_volume_dict(associators[0].path)
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def get_volume_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return associators[0]
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_volume_dict(self, volumeInstanceName):
"""Create volume dictionary
:param volumeInstanceName: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
volpath = volumeInstanceName
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
copyState=None, targetInstance=None, rsdInstance=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param copyState: wait for copy state
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
LOG.debug("Create replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
storageSystemName = sourceInstance['SystemName']
__, __, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
conn, extraSpecs[self.utils.POOL],
extraSpecs[self.utils.SLO],
extraSpecs[self.utils.WORKLOAD], storageSystemName))
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_element_replica():
if targetInstance is None and rsdInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(syncType, '16'),
SourceElement=sourceInstance.path,
Collections=[sgInstanceName])
else:
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance,
sgInstanceName, copyState)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
return do_create_element_replica()
def _create_element_replica_extra_params(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance, sgInstanceName,
copyState):
"""CreateElementReplica using extra parameters.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:param sgInstanceName: pool instance name
:param copyState: wait for copy state
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
"""
syncType = self.utils.get_num(syncType, '16')
if targetInstance and rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ReplicationSettingData=rsdInstance)
elif targetInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
WaitForCopyState=copyState)
elif rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
ReplicationSettingData=rsdInstance,
Collections=[sgInstanceName],
WaitForCopyState=copyState)
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
@lockutils.synchronized(groupName, "emc-sg-", True)
def do_create_storage_group_v3():
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
return do_create_storage_group_v3()
def get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
if foundStoragePoolSetting is None:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s and workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': slo,
'workload': workload})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: supportedSizeDict
"""
supportedSizeDict = {}
storagePoolCapabilityInstanceName = self.get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self.get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
return supportedSizeDict
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = ACTIVATESNAPVX
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = DEACTIVATESNAPVX
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(operation, '16'),
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
def get_srp_pool_stats(self, conn, arrayInfo):
"""Get the totalManagedSpace, remainingManagedSpace.
Capacity can be got in 2 ways depending on your configuration.
1. The total capacity of the SRP, if you dont have WLP enabled.
2. The SLO capacity, if you do
:param conn: the connection to the ecom server
:param arrayInfo: the array dict
:returns: totalCapacityGb
:returns: remainingCapacityGb
:returns: subscribedCapacityGb
:returns: array_reserve_percent
"""
totalCapacityGb = -1
remainingCapacityGb = -1
subscribedCapacityGb = -1
array_reserve_percent = -1
storageSystemInstanceName = self.utils.find_storageSystem(
conn, arrayInfo['SerialNumber'])
srpPoolInstanceNames = conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='Symm_SRPStoragePool')
for srpPoolInstanceName in srpPoolInstanceNames:
poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName)
if six.text_type(arrayInfo['PoolName']) == (
six.text_type(poolnameStr)):
try:
# Check that pool hasn't suddently been deleted.
srpPoolInstance = conn.GetInstance(srpPoolInstanceName)
propertiesList = srpPoolInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'TotalManagedSpace':
cimProperties = properties[1]
totalManagedSpace = cimProperties.value
totalCapacityGb = self.utils.convert_bits_to_gbs(
totalManagedSpace)
elif properties[0] == 'RemainingManagedSpace':
cimProperties = properties[1]
remainingManagedSpace = cimProperties.value
remainingCapacityGb = (
self.utils.convert_bits_to_gbs(
remainingManagedSpace))
elif properties[0] == 'EMCSubscribedCapacity':
cimProperties = properties[1]
subscribedManagedSpace = cimProperties.value
subscribedCapacityGb = (
self.utils.convert_bits_to_gbs(
subscribedManagedSpace))
elif properties[0] == 'EMCPercentReservedCapacity':
cimProperties = properties[1]
array_reserve_percent = int(cimProperties.value)
except Exception:
pass
remainingSLOCapacityGb = (
self._get_remaining_slo_capacity_wlp(
conn, srpPoolInstanceName, arrayInfo,
storageSystemInstanceName['Name']))
if remainingSLOCapacityGb != -1:
remainingCapacityGb = remainingSLOCapacityGb
else:
LOG.warning(_LW(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
{'remainingCapacityGb': remainingCapacityGb})
return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb,
array_reserve_percent)
def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName,
arrayInfo, systemName):
"""Get the remaining SLO capacity.
This is derived from the WLP portion of Unisphere. Please
see the SMIProvider doc and the readme doc for details.
:param conn: the connection to the ecom server
:param srpPoolInstanceName: SRP instance name
:param arrayInfo: the array dict
:param systemName: the system name
:returns: remainingCapacityGb
"""
remainingCapacityGb = -1
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, systemName))
supportedSizeDict = (
self.get_volume_range(
conn, storageConfigService, srpPoolInstanceName,
arrayInfo['SLO'], arrayInfo['Workload'],
None))
try:
# Information source is V3.
if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3:
remainingCapacityGb = self.utils.convert_bits_to_gbs(
supportedSizeDict['EMCRemainingSLOCapacity'])
LOG.debug("Received remaining SLO Capacity "
"%(remainingCapacityGb)s GBs for SLO "
"%(SLO)s and workload %(workload)s.",
{'remainingCapacityGb': remainingCapacityGb,
'SLO': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
except KeyError:
pass
return remainingCapacityGb
def extend_volume_in_SG(
self, conn, storageConfigService, volumeInstanceName,
volumeName, volumeSize, extraSpecs):
"""Extend a volume instance.
:param conn: connection to the ecom server
:param storageConfigservice: the storage configuration service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param volumeSize: the volume size
:param extraSpecs: additional info
:returns: volumeDict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, TheElement=volumeInstanceName,
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, error_desc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Extend Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': error_desc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
| |
import calendar
import datetime
from decimal import Decimal
from django.db import transaction
from django.db.models import F, Q, Min, Max, Sum
from django.utils.translation import ugettext as _, ungettext
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.accounting.exceptions import (
InvoiceAlreadyCreatedError,
InvoiceEmailThrottledError,
InvoiceError,
LineItemError,
)
from corehq.apps.accounting.models import (
LineItem, FeatureType, Invoice, DefaultProductPlan, Subscriber,
Subscription, BillingAccount, SubscriptionAdjustment,
SubscriptionAdjustmentMethod, BillingRecord,
SoftwarePlanEdition, CreditLine,
EntryPoint, WireInvoice, WireBillingRecord,
SMALL_INVOICE_THRESHOLD, UNLIMITED_FEATURE_USAGE,
SoftwareProductType
)
from corehq.apps.accounting.utils import (
ensure_domain_instance,
log_accounting_error,
log_accounting_info,
)
from corehq.apps.smsbillables.models import SmsBillable
from corehq.apps.users.models import CommCareUser
DEFAULT_DAYS_UNTIL_DUE = 30
class DomainInvoiceFactory(object):
"""
This handles all the little details when generating an Invoice.
"""
def __init__(self, date_start, date_end, domain, recipients=None):
"""
The Invoice generated will always be for the month preceding the
invoicing_date.
For example, if today is July 5, 2014 then the invoice will be from
June 1, 2014 to June 30, 2014.
"""
self.date_start = date_start
self.date_end = date_end
self.domain = ensure_domain_instance(domain)
self.recipients = recipients
self.logged_throttle_error = False
if self.domain is None:
raise InvoiceError("Domain '%s' is not a valid domain on HQ!" % domain)
self.is_community_invoice = False
def create_invoices(self):
subscriptions = self._get_subscriptions()
self._ensure_full_coverage(subscriptions)
for subscription in subscriptions:
try:
self._create_invoice_for_subscription(subscription)
except InvoiceAlreadyCreatedError as e:
log_accounting_error(
"Invoice already existed for domain %s: %s" % (self.domain.name, e),
show_stack_trace=True,
)
def _get_subscriptions(self):
subscriptions = Subscription.objects.filter(
subscriber=self.subscriber, date_start__lte=self.date_end
).filter(
Q(date_end=None) | Q(date_end__gt=self.date_start)
).filter(
Q(date_end=None) | Q(date_end__gt=F('date_start'))
).order_by('date_start', 'date_end').all()
return list(subscriptions)
@transaction.atomic
def _ensure_full_coverage(self, subscriptions):
plan_version = DefaultProductPlan.get_default_plan_version()
if not plan_version.feature_charges_exist_for_domain(self.domain):
return
community_ranges = self._get_community_ranges(subscriptions)
if not community_ranges:
return
# First check to make sure none of the existing subscriptions is set
# to do not invoice. Let's be on the safe side and not send a
# community invoice out, if that's the case.
do_not_invoice = any([s.do_not_invoice for s in subscriptions])
account = BillingAccount.get_or_create_account_by_domain(
self.domain.name,
created_by=self.__class__.__name__,
entry_point=EntryPoint.SELF_STARTED,
)[0]
if account.date_confirmed_extra_charges is None:
log_accounting_info(
"Did not generate invoice because date_confirmed_extra_charges "
"was null for domain %s" % self.domain.name
)
do_not_invoice = True
for start_date, end_date in community_ranges:
# create a new community subscription for each
# date range that the domain did not have a subscription
community_subscription = Subscription(
account=account,
plan_version=plan_version,
subscriber=self.subscriber,
date_start=start_date,
date_end=end_date,
do_not_invoice=do_not_invoice,
)
community_subscription.save()
subscriptions.append(community_subscription)
def _create_invoice_for_subscription(self, subscription):
def _get_invoice_start(sub, date_start):
return max(sub.date_start, date_start)
def _get_invoice_end(sub, date_end):
if sub.date_end is not None and sub.date_end <= date_end:
# Since the Subscription is actually terminated on date_end
# have the invoice period be until the day before date_end.
return sub.date_end - datetime.timedelta(days=1)
else:
return date_end
if subscription.is_trial:
# Don't create invoices for trial subscriptions
log_accounting_info(
"Skipping invoicing for Subscription %s because it's a trial."
% subscription.pk
)
return
if (
subscription.skip_invoicing_if_no_feature_charges
and not subscription.plan_version.feature_charges_exist_for_domain(self.domain)
):
log_accounting_info(
"Skipping invoicing for Subscription %s because there are no feature charges."
% subscription.pk
)
return
invoice_start = _get_invoice_start(subscription, self.date_start)
invoice_end = _get_invoice_end(subscription, self.date_end)
with transaction.atomic():
invoice = self._generate_invoice(subscription, invoice_start, invoice_end)
record = BillingRecord.generate_record(invoice)
if record.should_send_email:
try:
record.send_email(contact_emails=self.recipients)
except InvoiceEmailThrottledError as e:
if not self.logged_throttle_error:
log_accounting_error(e.message)
self.logged_throttle_error = True
else:
record.skipped_email = True
record.save()
return invoice
def _get_community_ranges(self, subscriptions):
community_ranges = []
if len(subscriptions) == 0:
return [(self.date_start, self.date_end + datetime.timedelta(days=1))]
else:
prev_sub_end = self.date_end
for ind, sub in enumerate(subscriptions):
if ind == 0 and sub.date_start > self.date_start:
# the first subscription started AFTER the beginning
# of the invoicing period
community_ranges.append((self.date_start, sub.date_start))
if prev_sub_end < self.date_end and sub.date_start > prev_sub_end:
community_ranges.append((prev_sub_end, sub.date_start))
prev_sub_end = sub.date_end
if (
ind == len(subscriptions) - 1 and
sub.date_end is not None and
sub.date_end <= self.date_end
):
# the last subscription ended BEFORE the end of
# the invoicing period
community_ranges.append(
(sub.date_end, self.date_end + datetime.timedelta(days=1))
)
return community_ranges
def _generate_invoice(self, subscription, invoice_start, invoice_end):
invoice, is_new_invoice = Invoice.objects.get_or_create(
subscription=subscription,
date_start=invoice_start,
date_end=invoice_end,
is_hidden=subscription.do_not_invoice,
)
if not is_new_invoice:
raise InvoiceAlreadyCreatedError("invoice id: {id}".format(id=invoice.id))
if subscription.subscriptionadjustment_set.count() == 0:
# record that the subscription was created
SubscriptionAdjustment.record_adjustment(
subscription,
method=SubscriptionAdjustmentMethod.TASK,
invoice=invoice,
)
DomainInvoiceFactory._generate_line_items(invoice, subscription)
invoice.calculate_credit_adjustments()
invoice.update_balance()
invoice.save()
visible_domain_invoices = Invoice.objects.filter(
is_hidden=False,
subscription__subscriber__domain=invoice.get_domain(),
)
total_balance = sum(invoice.balance for invoice in visible_domain_invoices)
should_set_date_due = (
total_balance > SMALL_INVOICE_THRESHOLD or
(invoice.account.auto_pay_enabled and total_balance > Decimal(0))
)
if should_set_date_due:
days_until_due = DEFAULT_DAYS_UNTIL_DUE
if subscription.date_delay_invoicing is not None:
td = subscription.date_delay_invoicing - self.date_end
days_until_due = max(days_until_due, td.days)
invoice.date_due = self.date_end + datetime.timedelta(days_until_due)
invoice.save()
return invoice
@staticmethod
def _generate_line_items(invoice, subscription):
product_rate = subscription.plan_version.product_rate
product_factory = ProductLineItemFactory(subscription, product_rate, invoice)
product_factory.create()
for feature_rate in subscription.plan_version.feature_rates.all():
feature_factory_class = FeatureLineItemFactory.get_factory_by_feature_type(
feature_rate.feature.feature_type
)
feature_factory = feature_factory_class(subscription, feature_rate, invoice)
feature_factory.create()
@property
def subscriber(self):
return Subscriber.objects.get_or_create(domain=self.domain.name)[0]
class DomainWireInvoiceFactory(object):
def __init__(self, domain, date_start=None, date_end=None, contact_emails=None):
self.date_start = date_start
self.date_end = date_end
self.contact_emails = contact_emails
self.domain = ensure_domain_instance(domain)
self.logged_throttle_error = False
if self.domain is None:
raise InvoiceError("Domain '{}' is not a valid domain on HQ!".format(self.domain))
@transaction.atomic
def create_wire_invoice(self, balance):
# Gather relevant invoices
invoices = Invoice.objects.filter(
subscription__subscriber__domain=self.domain,
is_hidden=False,
date_paid__exact=None,
).order_by('-date_start')
account = BillingAccount.get_or_create_account_by_domain(
self.domain.name,
created_by=self.__class__.__name__,
entry_point=EntryPoint.SELF_STARTED,
)[0]
# If no start date supplied, default earliest start date of unpaid invoices
if self.date_start:
date_start = self.date_start
else:
date_start = invoices.aggregate(Min('date_start'))['date_start__min']
# If no end date supplied, default latest end date of unpaid invoices
if self.date_end:
date_end = self.date_end
else:
date_end = invoices.aggregate(Max('date_end'))['date_end__max']
if not date_end:
date_end = datetime.datetime.today()
date_due = date_end + datetime.timedelta(DEFAULT_DAYS_UNTIL_DUE)
wire_invoice = WireInvoice.objects.create(
domain=self.domain.name,
date_start=date_start,
date_end=date_end,
date_due=date_due,
balance=balance,
account=account
)
record = WireBillingRecord.generate_record(wire_invoice)
if record.should_send_email:
try:
record.send_email(contact_emails=self.contact_emails)
except InvoiceEmailThrottledError as e:
# Currently wire invoices are never throttled
if not self.logged_throttle_error:
log_accounting_error(e.message)
self.logged_throttle_error = True
else:
record.skipped_email = True
record.save()
return wire_invoice
def create_wire_credits_invoice(self, items, amount):
from corehq.apps.accounting.tasks import create_wire_credits_invoice
create_wire_credits_invoice.delay(
domain_name=self.domain.name,
account_created_by=self.__class__.__name__,
account_entry_point=EntryPoint.SELF_STARTED,
amount=amount,
invoice_items=items,
contact_emails=self.contact_emails
)
class LineItemFactory(object):
"""
This generates a line item based on what type of Feature or Product rate triggers it.
"""
def __init__(self, subscription, rate, invoice):
self.subscription = subscription
self.rate = rate
self.invoice = invoice
@property
def unit_description(self):
"""
If this returns None then the unit unit_description, unit_cost, and quantity
will not show up for the line item in the printed invoice.
"""
return None
@property
def base_description(self):
"""
If this returns None then the unit base_description and base_cost
will not show up for the line item in the printed invoice.
"""
return None
@property
def unit_cost(self):
raise NotImplementedError()
@property
def quantity(self):
raise NotImplementedError()
@property
@memoized
def subscribed_domains(self):
if self.subscription.subscriber.domain is None:
raise LineItemError("No domain could be obtained as the subscriber.")
return [self.subscription.subscriber.domain]
def create(self):
line_item = LineItem(
invoice=self.invoice,
base_description=self.base_description,
unit_description=self.unit_description,
unit_cost=self.unit_cost,
quantity=self.quantity,
)
return line_item
@classmethod
def get_factory_by_feature_type(cls, feature_type):
try:
return {
FeatureType.SMS: SmsLineItemFactory,
FeatureType.USER: UserLineItemFactory,
}[feature_type]
except KeyError:
raise LineItemError("No line item factory exists for the feature type '%s" % feature_type)
@property
@memoized
def is_prorated(self):
return not (
self.invoice.date_end.day == self._days_in_billing_period
and self.invoice.date_start.day == 1
)
@property
def num_prorated_days(self):
return self.invoice.date_end.day - self.invoice.date_start.day + 1
@property
def _days_in_billing_period(self):
return calendar.monthrange(self.invoice.date_end.year, self.invoice.date_end.month)[1]
class ProductLineItemFactory(LineItemFactory):
def create(self):
line_item = super(ProductLineItemFactory, self).create()
line_item.product_rate = self.rate
if not self.is_prorated:
line_item.base_cost = self.rate.monthly_fee
line_item.save()
if self.subscription.auto_generate_credits:
self._auto_generate_credits(line_item)
return line_item
@property
def base_description(self):
if not self.is_prorated:
return _("One month of %(plan_name)s Software Plan.") % {
'plan_name': self.plan_name,
}
@property
def unit_description(self):
if self.is_prorated:
return ungettext(
"%(num_days)s day of %(plan_name)s Software Plan.",
"%(num_days)s days of %(plan_name)s Software Plan.",
self.num_prorated_days
) % {
'num_days': self.num_prorated_days,
'plan_name': self.plan_name,
}
@property
def unit_cost(self):
if self.is_prorated:
return Decimal("%.2f" % round(self.rate.monthly_fee / self._days_in_billing_period, 2))
return Decimal('0.0')
@property
def quantity(self):
if self.is_prorated:
return self.num_prorated_days
return 1
@property
def plan_name(self):
return self.subscription.plan_version.plan.name
def _auto_generate_credits(self, line_item):
CreditLine.add_credit(
line_item.subtotal,
subscription=self.subscription,
product_type=SoftwareProductType.ANY,
permit_inactive=True,
)
class FeatureLineItemFactory(LineItemFactory):
def create(self):
line_item = super(FeatureLineItemFactory, self).create()
line_item.feature_rate = self.rate
line_item.save()
return line_item
@property
def unit_cost(self):
return self.rate.per_excess_fee
class UserLineItemFactory(FeatureLineItemFactory):
@property
def unit_cost(self):
non_prorated_unit_cost = super(UserLineItemFactory, self).unit_cost
# To ensure that integer division is avoided
assert isinstance(non_prorated_unit_cost, Decimal)
if self.is_prorated:
return Decimal(
"%.2f" % round(
non_prorated_unit_cost * self.num_prorated_days / self._days_in_billing_period, 2
)
)
return non_prorated_unit_cost
@property
def quantity(self):
return self.num_excess_users
@property
def num_excess_users(self):
if self.rate.monthly_limit == UNLIMITED_FEATURE_USAGE:
return 0
else:
return max(self.num_users - self.rate.monthly_limit, 0)
@property
@memoized
def num_users(self):
total_users = 0
for domain in self.subscribed_domains:
total_users += CommCareUser.total_by_domain(domain, is_active=True)
return total_users
@property
def unit_description(self):
if self.num_excess_users > 0:
return ungettext(
"Per User fee exceeding monthly limit of %(monthly_limit)s user.",
"Per User fee exceeding monthly limit of %(monthly_limit)s users.",
self.rate.monthly_limit
) % {
'monthly_limit': self.rate.monthly_limit,
}
class SmsLineItemFactory(FeatureLineItemFactory):
@property
@memoized
def unit_cost(self):
total_excess = Decimal('0.0')
if self.is_within_monthly_limit:
return total_excess
sms_count = 0
for billable in self.sms_billables:
sms_count += billable.multipart_count
if sms_count <= self.rate.monthly_limit:
# don't count fees until the free monthly limit is exceeded
continue
else:
total_message_charge = billable.gateway_charge + billable.usage_charge
num_parts_over_limit = sms_count - self.rate.monthly_limit
already_over_limit = num_parts_over_limit >= billable.multipart_count
if already_over_limit:
total_excess += total_message_charge
else:
total_excess += total_message_charge * num_parts_over_limit / billable.multipart_count
return Decimal("%.2f" % round(total_excess, 2))
@property
@memoized
def quantity(self):
return 1
@property
@memoized
def unit_description(self):
if self.rate.monthly_limit == UNLIMITED_FEATURE_USAGE:
return ungettext(
"%(num_sms)d SMS Message",
"%(num_sms)d SMS Messages",
self.num_sms
) % {
'num_sms': self.num_sms,
}
elif self.is_within_monthly_limit:
return _(
"%(num_sms)d of %(monthly_limit)d included SMS messages"
) % {
'num_sms': self.num_sms,
'monthly_limit': self.rate.monthly_limit,
}
else:
assert self.rate.monthly_limit != UNLIMITED_FEATURE_USAGE
assert self.rate.monthly_limit < self.num_sms
num_extra = self.num_sms - self.rate.monthly_limit
assert num_extra > 0
return ungettext(
"%(num_extra_sms)d SMS Message beyond %(monthly_limit)d messages included.",
"%(num_extra_sms)d SMS Messages beyond %(monthly_limit)d messages included.",
num_extra
) % {
'num_extra_sms': num_extra,
'monthly_limit': self.rate.monthly_limit,
}
@property
@memoized
def sms_billables_queryset(self):
return SmsBillable.objects.filter(
domain__in=self.subscribed_domains,
is_valid=True,
date_sent__gte=self.invoice.date_start,
date_sent__lt=self.invoice.date_end + datetime.timedelta(days=1),
).order_by('-date_sent')
@property
@memoized
def sms_billables(self):
return list(self.sms_billables_queryset)
@property
@memoized
def num_sms(self):
return self.sms_billables_queryset.aggregate(Sum('multipart_count'))['multipart_count__sum'] or 0
@property
@memoized
def is_within_monthly_limit(self):
if self.rate.monthly_limit == UNLIMITED_FEATURE_USAGE:
return True
else:
return self.num_sms <= self.rate.monthly_limit
| |
# django imports
from django.conf import settings
from django.template import Context, Template
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
# SciPy Central imports
from scipy_central.filestorage.models import FileSet
from scipy_central.utils import ensuredir
from scipy_central.submission import models
# python imports
import logging
import zipfile
import datetime
import shutil
import os
logger = logging.getLogger('scipycentral')
logger.debug('Initializing submission::storage.py')
def get_repo_path(rev_object):
"""
Creates a new repo path based on `pk` value of `rev_object`
If the path name already exists, it is appended by `_count_of_path`
"""
timestamp = datetime.datetime.now()
repo_path = os.path.join(timestamp.strftime('%Y'), timestamp.strftime('%m'),
'%06d' % rev_object.pk)
# if by chance the path exists
# append name by `_number` and increment `number` until it does not exist
path = repo_path
count = 1
while os.path.exists(os.path.join(settings.SPC['storage_dir'], path)):
path = repo_path + '_%d' % count
count += 1
return path
def copy_package_file(package_file, dst):
"""
`package_file` is an in-memory file object created by django
`dst` is destination file path.
If object size > 2MB (default limit), it is first stored
in a temporary directory by django
"""
destination = open(dst, 'wb+')
for chunk in package_file.chunks():
destination.write(chunk)
destination.close()
class SubmissionStorage(object):
"""
Class used to handle storage of submissions in file system.
It is assumed that the application alreay established connection
with storage directory (or disk) and has all necessary permissions.
The storage directory is recommended to handle with this class
"""
def store(self):
"""
Wrapper method to handle operations
If is_new is True, creates a new repository or else commit new
changes in the exisiting repository.
"""
if self.is_new:
return self.__create_new_submission()
else:
return self.__create_revision()
def __create_revision(self):
"""
Creates revision to an existing submission
"""
# get or create repo
repo = self.object.entry.fileset.get_repo()
# commit revision
if self.object.entry.sub_type == 'snippet':
snippet_name = self.object.slug.replace('-', '_') + '.py'
commit_msg = ('Update of file(s) in the repo based on the web submission by'
'user "%s"' % (self.object.created_by.username))
return self.__commit_snippet(snippet_name, self.object.item_code, commit_msg)
elif self.object.entry.sub_type == 'package':
# explicit validation is required as revision object does not have
# `package_file` attribute and is only passed for easy usage instead of passing
# `form` object!
if not hasattr(self.object, 'package_file'):
raise AttributeError('Uploaded file not passed to revision object')
repo_path = self.object.entry.fileset.repo_path
full_repo_path = os.path.join(settings.SPC['storage_dir'], repo_path)
# if not new - has an exisiting repo from previous revision
# delete all data except revision backend dir
revision_backend_dir = '.' + settings.SPC['revisioning_backend']
for path, dirs, files in os.walk(full_repo_path):
# ignore dir
if revision_backend_dir in dirs:
dirs.remove(revision_backend_dir)
# remove files in path
for name in files:
os.remove(os.path.join(path, name))
# remove dirs in path
for name in dirs:
shutil.rmtree(os.path.join(path, name))
# remove deleted files from repo
repo.remove(['--after'])
commit_msg = 'Update files from web-uploaded ZIP file, DESCRIPTION.txt'
return self.__commit_package(self.object.package_file, commit_msg)
else:
raise TypeError('Unknown submission type')
def __create_new_submission(self):
"""
New submission is created
"""
self.__create_repo()
if self.object.entry.sub_type == 'snippet':
snippet_name = slugify(self.object.title).replace('-', '_') + '.py'
commit_msg = ('Add "%s" to the repo based on the web submission by user "%s"'
% (snippet_name, self.object.created_by.username))
return self.__commit_snippet(snippet_name, self.object.item_code, commit_msg)
elif self.object.entry.sub_type == 'package':
if not hasattr(self.object, 'package_file'):
raise AttributeError('Upload file not passed to revision object')
commit_msg = 'Add files from web-uploaded ZIP file, DESCRIPTION.txt'
return self.__commit_package(self.object.package_file, commit_msg)
else:
raise TypeError('Unknown submission type')
def __commit_package(self, package_file, commit_msg):
"""
Adds files in `package_file` to the repository
1. The `package_file` object is copied to repo path
2. All files except repo dirs (.hg, .git, .svn etc) are extracted
3. DESCRIPTION.txt, LICENSE.txt files are added
4. Changes committed to repo
raises DVCSError if new files contain no changes from the existing
ones
"""
repo_path = self.object.entry.fileset.repo_path
full_repo_path = os.path.join(settings.SPC['storage_dir'], repo_path)
# copy package file to repo
dst = os.path.join(full_repo_path, package_file.name)
copy_package_file(package_file, dst)
# unzip package file
zip_obj = zipfile.ZipFile(dst, 'r')
for file_name in zip_obj.namelist():
# ignore revision backend dirs if present
# zipfile.ZipFile object ensures seperators are '/'
base_dir = file_name.split('/')[0]
if not base_dir in settings.SPC['common_rcs_dirs']:
zip_obj.extract(zip_obj.getinfo(file_name), full_repo_path)
zip_obj.close()
# delete zip file
os.remove(dst)
# commit package files
repo_obj = self.object.entry.fileset.get_repo()
for path, dirs, files in os.walk(full_repo_path):
# exclude revision backend dir
if os.path.split(path)[1] == '.' + settings.SPC['revisioning_backend']:
for entry in dirs[:]:
dirs.remove(entry)
continue
all_files = []
for name in files:
all_files.append(os.path.join(path, name))
if all_files:
repo_obj.add(patterns=all_files, ignore_errors=True)
# add `description.txt` file
description_name = os.path.join(full_repo_path, 'DESCRIPTION.txt')
description_file = file(description_name, 'wb')
description_file.write(self.object.description)
description_file.close()
self.object.entry.fileset.add_file(description_name,
user=self.object.created_by.get_absolute_url(),
commit_msg=commit_msg)
# add license file
license_text = self.__get_license_text()
self.object.entry.fileset.add_file_from_string(settings.SPC['license_filename'],
license_text, user='SciPy Central',
commit_msg='SPC: add/update license file')
# log info
logger.info('SubmissionStorage:: Commit package to the repo: '
'Repo [dir=%s] Revision [pk=%d] User [pk=%d]'
% (repo_path, self.object.pk, self.object.created_by.pk))
# hash id
return self.object.entry.fileset.get_hash()
def __commit_snippet(self, snippet_name, snippet_text, commit_msg):
"""
Add snippet text to the repository
1. Create a file named `snippet_name` with `snippet_text` in it
2. Add LICENSE.txt to the repo
3. Commit changes to the repo
"""
self.object.entry.fileset.add_file_from_string(snippet_name, snippet_text,
user=self.object.created_by.get_absolute_url(),
commit_msg=commit_msg)
# commit license file
license_text = self.__get_license_text()
self.object.entry.fileset.add_file_from_string(settings.SPC['license_filename'],
license_text, user='SciPy Central',
commit_msg='SPC: add/update license file')
# log info
logger.info('SubmissionStorage:: Commit snippet to the repo: '
'Repo [dir=%s] Revision [pk=%d] User [pk=%d]'
% (self.object.entry.fileset.repo_path, self.object.pk,
self.object.created_by.pk))
# hash id
return self.object.entry.fileset.get_hash()
def __create_repo(self):
"""
Create empty repository for the fileset object
"""
if not isinstance(self.object.entry.fileset, FileSet):
raise TypeError('FileSet object not passed to fileset attribute')
repo_path = self.object.entry.fileset.repo_path
ensuredir(os.path.join(settings.SPC['storage_dir'], repo_path))
repo = self.object.entry.fileset.create_empty()
# log info
logger.info('SubmissionStorage:: Created an empty repository: '
'Path [dir=%s] Revision [pk=%d] User [pk=%d]'
% (repo_path, self.object.pk, self.object.created_by.pk))
return repo
def revert(self, hash_id=None):
"""
Revert changes made. returns `True` if reverted,
`False` if not due to any errors.
"""
fileset = self.object.entry.fileset
is_reverted = True
if self.is_new:
full_repo_path = os.path.join(settings.SPC['storage_dir'], fileset.repo_path)
# Delete repo path if exists
if os.path.exists(full_repo_path):
try:
shutil.rmtree(full_repo_path)
# log the operation
logger.error('SubmissionStorage:: Removed created repo on error')
except (WindowsError, OSError, IOError), e:
# log the error
logger.critical('SubmissionStorage:: Unable to remove repo on error %s' % e)
is_reverted = False
else:
if not isinstance(hash_id, (str, unicode)):
raise TypeError('hash_id argument should be str if `is_new` is False')
repo = fileset.get_repo()
try:
# remove all untracked changes
# Note: need to turn on `purge` extension in hg
repo.purge()
# go back to previous commit
repo.update(['-r%s' % hash_id])
# log the operation
logger.error('SubmissionStorage:: Revert repo changes on error')
except RuntimeError, e:
# log the error
logger.critical('SubmissionStorage:: Unable to revert changes in repo %s' % e)
is_reverted = False
return is_reverted
def __get_license_text(self):
"""
Generates and returns the license text for the given revision. Uses these
revision and authorship information from previous revisions, if necessary,
to create the license.
"""
slug = self.object.sub_license.slug
if slug == 'cc0':
license_cc0 = Template(self.object.sub_license.text_template).render(Context())
sub_license = render_to_string('submission/license-cc0.txt',
{'obj': self.object, 'license_cc0': license_cc0,
'url_domain': settings.SPC['short_URL_root']})
return sub_license
elif slug == 'bsd':
license_bsd = Template(self.object.sub_license.text_template).render(
Context({
'year': datetime.datetime.now().year,
'copyright_holder': settings.SPC['short_URL_root'] + \
self.object.created_by.get_absolute_url()
}))
sub_license = render_to_string('submission/license-bsd.txt',
{'obj': self.object, 'license_bsd': license_bsd,
'url_domain': settings.SPC['short_URL_root']})
return sub_license
else:
raise NotImplementedError('%s license is not yet implemented' % slug)
def __init__(self, revision, is_new):
"""
self.object is instance of `Revision` object
self.is_new is `True` if for new submission or `False` if not
"""
self.object = revision
self.is_new = is_new
def __setattr__(self, key, value):
if key == 'object':
if not isinstance(value, models.Revision):
raise TypeError('Revision object instance has to be passed')
if key == 'is_new':
if not isinstance(value, bool):
raise TypeError('is_new object only accepts boolean values')
super(SubmissionStorage, self).__setattr__(key, value)
| |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.base import app_manager
from ryu.controller import event
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import addrconv
from ryu.lib.dpid import dpid_to_str
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import slow
LAG_EV_DISPATCHER = "lacplib"
class EventPacketIn(event.EventBase):
"""a PacketIn event class using except LACP."""
def __init__(self, msg):
"""initialization."""
super(EventPacketIn, self).__init__()
self.msg = msg
class EventSlaveStateChanged(event.EventBase):
"""a event class that notifies the changes of the statuses of the
slave i/fs."""
def __init__(self, datapath, port, enabled):
"""initialization."""
super(EventSlaveStateChanged, self).__init__()
self.datapath = datapath
self.port = port
self.enabled = enabled
class LacpLib(app_manager.RyuApp):
"""LACP exchange library. this works only in a PASSIVE mode."""
#-------------------------------------------------------------------
# PUBLIC METHODS
#-------------------------------------------------------------------
def __init__(self):
"""initialization."""
super(LacpLib, self).__init__()
self.name = 'lacplib'
self._bonds = []
self._add_flow = {
ofproto_v1_0.OFP_VERSION: self._add_flow_v1_0,
ofproto_v1_2.OFP_VERSION: self._add_flow_v1_2,
ofproto_v1_3.OFP_VERSION: self._add_flow_v1_2,
}
self._set_logger()
def add(self, dpid, ports):
"""add a setting of a bonding i/f.
'add' method takes the corresponding args in this order.
========= =====================================================
Attribute Description
========= =====================================================
dpid datapath id.
ports a list of integer values that means the ports face
with the slave i/fs.
========= =====================================================
if you want to use multi LAG, call 'add' method more than once.
"""
assert isinstance(ports, list)
assert 2 <= len(ports)
ifs = {}
for port in ports:
ifs[port] = {'enabled': False, 'timeout': 0}
bond = {}
bond[dpid] = ifs
self._bonds.append(bond)
#-------------------------------------------------------------------
# PUBLIC METHODS ( EVENT HANDLERS )
#-------------------------------------------------------------------
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, evt):
"""PacketIn event handler. when the received packet was LACP,
proceed it. otherwise, send a event."""
req_pkt = packet.Packet(evt.msg.data)
if slow.lacp in req_pkt:
(req_lacp, ) = req_pkt.get_protocols(slow.lacp)
(req_eth, ) = req_pkt.get_protocols(ethernet.ethernet)
self._do_lacp(req_lacp, req_eth.src, evt.msg)
else:
self.send_event_to_observers(EventPacketIn(evt.msg))
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, evt):
"""FlowRemoved event handler. when the removed flow entry was
for LACP, set the status of the slave i/f to disabled, and
send a event."""
msg = evt.msg
datapath = msg.datapath
ofproto = datapath.ofproto
dpid = datapath.id
match = msg.match
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
port = match.in_port
dl_type = match.dl_type
else:
port = match['in_port']
dl_type = match['eth_type']
if ether.ETH_TYPE_SLOW != dl_type:
return
self.logger.info(
"SW=%s PORT=%d LACP exchange timeout has occurred.",
dpid_to_str(dpid), port)
self._set_slave_enabled(dpid, port, False)
self._set_slave_timeout(dpid, port, 0)
self.send_event_to_observers(
EventSlaveStateChanged(datapath, port, False))
#-------------------------------------------------------------------
# PRIVATE METHODS ( RELATED TO LACP )
#-------------------------------------------------------------------
def _do_lacp(self, req_lacp, src, msg):
"""packet-in process when the received packet is LACP."""
datapath = msg.datapath
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
port = msg.in_port
else:
port = msg.match['in_port']
self.logger.info("SW=%s PORT=%d LACP received.",
dpid_to_str(dpid), port)
self.logger.debug(str(req_lacp))
# when LACP arrived at disabled port, update the status of
# the slave i/f to enabled, and send a event.
if not self._get_slave_enabled(dpid, port):
self.logger.info(
"SW=%s PORT=%d the slave i/f has just been up.",
dpid_to_str(dpid), port)
self._set_slave_enabled(dpid, port, True)
self.send_event_to_observers(
EventSlaveStateChanged(datapath, port, True))
# set the idle_timeout time using the actor state of the
# received packet.
if req_lacp.LACP_STATE_SHORT_TIMEOUT == \
req_lacp.actor_state_timeout:
idle_timeout = req_lacp.SHORT_TIMEOUT_TIME
else:
idle_timeout = req_lacp.LONG_TIMEOUT_TIME
# when the timeout time has changed, update the timeout time of
# the slave i/f and re-enter a flow entry for the packet from
# the slave i/f with idle_timeout.
if idle_timeout != self._get_slave_timeout(dpid, port):
self.logger.info(
"SW=%s PORT=%d the timeout time has changed.",
dpid_to_str(dpid), port)
self._set_slave_timeout(dpid, port, idle_timeout)
func = self._add_flow.get(ofproto.OFP_VERSION)
assert func
func(src, port, idle_timeout, datapath)
# create a response packet.
res_pkt = self._create_response(datapath, port, req_lacp)
# packet-out the response packet.
out_port = ofproto.OFPP_IN_PORT
actions = [parser.OFPActionOutput(out_port)]
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER,
data=res_pkt.data, in_port=port, actions=actions)
datapath.send_msg(out)
def _create_response(self, datapath, port, req):
"""create a packet including LACP."""
src = datapath.ports[port].hw_addr
res_ether = ethernet.ethernet(
slow.SLOW_PROTOCOL_MULTICAST, src, ether.ETH_TYPE_SLOW)
res_lacp = self._create_lacp(datapath, port, req)
res_pkt = packet.Packet()
res_pkt.add_protocol(res_ether)
res_pkt.add_protocol(res_lacp)
res_pkt.serialize()
return res_pkt
def _create_lacp(self, datapath, port, req):
"""create a LACP packet."""
actor_system = datapath.ports[datapath.ofproto.OFPP_LOCAL].hw_addr
res = slow.lacp(
actor_system_priority=0xffff,
actor_system=actor_system,
actor_key=req.actor_key,
actor_port_priority=0xff,
actor_port=port,
actor_state_activity=req.LACP_STATE_PASSIVE,
actor_state_timeout=req.actor_state_timeout,
actor_state_aggregation=req.actor_state_aggregation,
actor_state_synchronization=req.actor_state_synchronization,
actor_state_collecting=req.actor_state_collecting,
actor_state_distributing=req.actor_state_distributing,
actor_state_defaulted=req.LACP_STATE_OPERATIONAL_PARTNER,
actor_state_expired=req.LACP_STATE_NOT_EXPIRED,
partner_system_priority=req.actor_system_priority,
partner_system=req.actor_system,
partner_key=req.actor_key,
partner_port_priority=req.actor_port_priority,
partner_port=req.actor_port,
partner_state_activity=req.actor_state_activity,
partner_state_timeout=req.actor_state_timeout,
partner_state_aggregation=req.actor_state_aggregation,
partner_state_synchronization=req.actor_state_synchronization,
partner_state_collecting=req.actor_state_collecting,
partner_state_distributing=req.actor_state_distributing,
partner_state_defaulted=req.actor_state_defaulted,
partner_state_expired=req.actor_state_expired,
collector_max_delay=0)
self.logger.info("SW=%s PORT=%d LACP sent.",
dpid_to_str(datapath.id), port)
self.logger.debug(str(res))
return res
def _get_slave_enabled(self, dpid, port):
"""get whether a slave i/f at some port of some datapath is
enable or not."""
slave = self._get_slave(dpid, port)
if slave:
return slave['enabled']
else:
return False
def _set_slave_enabled(self, dpid, port, enabled):
"""set whether a slave i/f at some port of some datapath is
enable or not."""
slave = self._get_slave(dpid, port)
if slave:
slave['enabled'] = enabled
def _get_slave_timeout(self, dpid, port):
"""get the timeout time at some port of some datapath."""
slave = self._get_slave(dpid, port)
if slave:
return slave['timeout']
else:
return 0
def _set_slave_timeout(self, dpid, port, timeout):
"""set the timeout time at some port of some datapath."""
slave = self._get_slave(dpid, port)
if slave:
slave['timeout'] = timeout
def _get_slave(self, dpid, port):
"""get slave i/f at some port of some datapath."""
result = None
for bond in self._bonds:
if dpid in bond:
if port in bond[dpid]:
result = bond[dpid][port]
break
return result
#-------------------------------------------------------------------
# PRIVATE METHODS ( RELATED TO OPEN FLOW PROTOCOL )
#-------------------------------------------------------------------
def _add_flow_v1_0(self, src, port, timeout, datapath):
"""enter a flow entry for the packet from the slave i/f
with idle_timeout. for OpenFlow ver1.0."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
in_port=port, dl_src=addrconv.mac.text_to_bin(src),
dl_type=ether.ETH_TYPE_SLOW)
actions = [parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER, 65535)]
mod = parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=timeout,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
def _add_flow_v1_2(self, src, port, timeout, datapath):
"""enter a flow entry for the packet from the slave i/f
with idle_timeout. for OpenFlow ver1.2 and ver1.3."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
in_port=port, eth_src=src, eth_type=ether.ETH_TYPE_SLOW)
actions = [parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER, ofproto.OFPCML_MAX)]
inst = [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(
datapath=datapath, command=ofproto.OFPFC_ADD,
idle_timeout=timeout, flags=ofproto.OFPFF_SEND_FLOW_REM,
match=match, instructions=inst)
datapath.send_msg(mod)
#-------------------------------------------------------------------
# PRIVATE METHODS ( OTHERS )
#-------------------------------------------------------------------
def _set_logger(self):
"""change log format."""
self.logger.propagate = False
hdl = logging.StreamHandler()
fmt_str = '[LACP][%(levelname)s] %(message)s'
hdl.setFormatter(logging.Formatter(fmt_str))
self.logger.addHandler(hdl)
| |
from __future__ import absolute_import, unicode_literals
import environ
from django.utils.translation import ugettext_lazy as _
from os.path import basename
SETTINGS_DIR = environ.Path(__file__) - 1
DJANGO_ROOT = environ.Path(__file__) - 2
PROJECT_ROOT = environ.Path(__file__) - 3
PROJECT_NAME = basename(str(DJANGO_ROOT))
APPS_DIR = PROJECT_ROOT.path('apps')
PROJECT_TEMPLATES = [
str(PROJECT_ROOT.path('templates')),
]
env = environ.Env()
SECRET_FILE = str(PROJECT_ROOT.path('security/SECRET.key'))
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!$%&()=+-_'
SECRET_KEY = get_random_string(50, chars)
with open(SECRET_FILE, 'w') as f:
f.write(SECRET_KEY)
except IOError:
raise Exception('Could not open %s for writing!' % SECRET_FILE)
DEBUG = env.bool('DEBUG', False)
ALLOWED_HOSTS = []
ADMINS = (
('jonathan', 'jony327@gmail.com'),
)
MANAGERS = ADMINS
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'django_extensions',
'rest_framework',
'django_sites',
)
LOCAL_APPS = (
'core',
'apps.menu',
'apps.customer',
'apps.taxonomy',
'apps.container',
'apps.dashboard',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'core.middleware.thread_user.CuserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
WSGI_APPLICATION = '%s.wsgi.application' % PROJECT_NAME
ROOT_URLCONF = '%s.urls' % PROJECT_NAME
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': PROJECT_TEMPLATES,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGES = (
('es', _('Spanish')),
('en', _('English')),
)
LANGUAGE_CODE = 'en-us'
LOCALE_PATHS = (
str(PROJECT_ROOT.path('locale')),
)
TIME_ZONE = 'America/Lima'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = str(PROJECT_ROOT.path('run/static'))
MEDIA_ROOT = str(PROJECT_ROOT.path('run/media'))
STATICFILES_DIRS = [
str(PROJECT_ROOT.path('static')),
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
AUTH_USER_MODEL = 'customer.User'
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'complete': {
'format': '%(levelname)s:%(asctime)s:%(module)s %(message)s'
},
'simple': {
'format': '%(levelname)s:%(asctime)s: %(message)s'
},
'null': {
'format': '%(message)s',
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
}
}
CSRF_COOKIE_HTTPONLY = False
SESSION_COOKIE_HTTPONLY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
| |
"""
.. module:: layer
:platform: Windows, Linux
:synopsis: Class that contians feature service layer information.
.. moduleauthor:: Esri
"""
import types
import common
import filters
import featureservice
from base import BaseAGOLClass
import os
import json
import math
import urlparse
import mimetypes
import uuid
########################################################################
class FeatureLayer(BaseAGOLClass):
"""
This contains information about a feature service's layer.
"""
_objectIdField = None
_allowGeometryUpdates = None
_globalIdField = None
_token_url = None
_currentVersion = None
_id = None
_name = None
_type = None
_description = None
_definitionExpression = None
_geometryType = None
_hasZ = None
_hasM = None
_copyrightText = None
_parentLayer = None
_subLayers = None
_minScale = None
_maxScale = None
_effectiveMinScale = None
_effectiveMaxScale = None
_defaultVisibility = None
_extent = None
_timeInfo = None
_drawingInfo = None
_hasAttachments = None
_htmlPopupType = None
_displayField = None
_typeIdField = None
_fields = None
_types = None # sub-types
_relationships = None
_maxRecordCount = None
_canModifyLayer = None
_supportsStatistics = None
_supportsAdvancedQueries = None
_hasLabels = None
_canScaleSymbols = None
_capabilities = None
_supportedQueryFormats = None
_isDataVersioned = None
_ownershipBasedAccessControlForFeatures = None
_useStandardizedQueries = None
_templates = None
_indexes = None
_hasStaticData = None
_supportsRollbackOnFailureParameter = None
_advancedQueryCapabilities = None
_editingInfo = None
_proxy_url = None
_proxy_port = None
_supportsCalculate = None
_supportsAttachmentsByUploadId = None
#----------------------------------------------------------------------
def __init__(self, url,
username=None,
password=None,
token_url=None,
initialize=False,
proxy_url=None,
proxy_port=None):
"""Constructor"""
self._url = url
self._token_url = token_url
self._username = username
self._password = password
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if not username is None and \
not password is None and \
not username is "" and \
not password is "":
if not token_url is None:
res = self.generate_token(tokenURL=token_url,
proxy_port=proxy_port,
proxy_url=proxy_url)
else:
res = self.generate_token(proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if res is None:
print "Token was not generated"
elif 'error' in res:
print res
else:
self._token = res[0]
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the service """
params = {
"f" : "json",
}
if self._token is not None:
params['token'] = self._token
json_dict = self._do_get(self._url, params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implmented in Feature Layer."
self._parentLayer = featureservice.FeatureService(
url=os.path.dirname(self._url),
token_url=self._token_url,
username=self._username,
password=self._password)
#----------------------------------------------------------------------
def __str__(self):
""" returns object as string """
return json.dumps(dict(self), default=common._date_handler)
#----------------------------------------------------------------------
def __iter__(self):
""" iterator generator for public values/properties
It only returns the properties that are public.
"""
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_') and \
not isinstance(getattr(self, attr), (types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType))
]
for att in attributes:
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def supportsAttachmentsByUploadId(self):
""" returns the supports attachments by upload id """
if self._supportsAttachmentsByUploadId is None:
self.__init()
return self._supportsAttachmentsByUploadId
#----------------------------------------------------------------------
@property
def supportsCalculate(self):
""" returns the supports calculate value """
if self._supportsCalculate is None:
self.__init()
return self._supportsCalculate
#----------------------------------------------------------------------
@property
def editingInfo(self):
""" returns the edit information """
if self._editingInfo is None:
self.__init()
return self._editingInfo
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
""" returns the advanced query capabilities """
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def supportsRollbackOnFailureParameter(self):
""" returns if rollback on failure supported """
if self._supportsRollbackOnFailureParameter is None:
self.__init()
return self._supportsRollbackOnFailureParameter
#----------------------------------------------------------------------
@property
def hasStaticData(self):
"""boolean T/F if static data is present """
if self._hasStaticData is None:
self.__init()
return self._hasStaticData
#----------------------------------------------------------------------
@property
def indexes(self):
"""gets the indexes"""
if self._indexes is None:
self.__init()
return self._indexes
#----------------------------------------------------------------------
@property
def templates(self):
""" gets the template """
if self._templates is None:
self.__init()
return self._templates
#----------------------------------------------------------------------
@property
def allowGeometryUpdates(self):
""" returns boolean if geometry updates are allowed """
if self._allowGeometryUpdates is None:
self.__init()
return self._allowGeometryUpdates
#----------------------------------------------------------------------
@property
def globalIdField(self):
""" returns the global id field """
if self._globalIdField is None:
self.__init()
return self._globalIdField
#----------------------------------------------------------------------
@property
def objectIdField(self):
if self._objectIdField is None:
self.__init()
return self._objectIdField
#----------------------------------------------------------------------
@property
def currentVersion(self):
""" returns the current version """
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def id(self):
""" returns the id """
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def name(self):
""" returns the name """
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def type(self):
""" returns the type """
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def description(self):
""" returns the layer's description """
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def definitionExpression(self):
"""returns the definitionExpression"""
if self._definitionExpression is None:
self.__init()
return self._definitionExpression
#----------------------------------------------------------------------
@property
def geometryType(self):
"""returns the geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def hasZ(self):
""" returns if it has a Z value or not """
if self._hasZ is None:
self.__init()
return self._hasZ
#----------------------------------------------------------------------
@property
def hasM(self):
""" returns if it has a m value or not """
if self._hasM is None:
self.__init()
return self._hasM
#----------------------------------------------------------------------
@property
def copyrightText(self):
""" returns the copyright text """
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def parentLayer(self):
""" returns information about the parent """
if self._parentLayer is None:
self.__init()
return self._parentLayer
#----------------------------------------------------------------------
@property
def subLayers(self):
""" returns sublayers for layer """
if self._subLayers is None:
self.__init()
return self._subLayers
#----------------------------------------------------------------------
@property
def minScale(self):
""" minimum scale layer will show """
if self._minScale is None:
self.__init()
return self._minScale
@property
def maxScale(self):
""" sets the max scale """
if self._maxScale is None:
self.__init()
return self._maxScale
@property
def effectiveMinScale(self):
""" returns the effective minimum scale value """
if self._effectiveMinScale is None:
self.__init()
return self._effectiveMinScale
@property
def effectiveMaxScale(self):
""" returns the effective maximum scale value """
if self._effectiveMaxScale is None:
self.__init()
return self._effectiveMaxScale
@property
def defaultVisibility(self):
""" returns the default visibility of the layer """
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
@property
def extent(self):
""" returns the extent """
if self._extent is None:
self.__init()
return self._extent
@property
def timeInfo(self):
""" returns the time information about the layer """
if self._timeInfo is None:
self.__init()
return self._timeInfo
@property
def drawingInfo(self):
""" returns the symbol information about the layer """
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
@property
def hasAttachments(self):
""" boolean that tells if attachments are associated with layer """
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
@property
def htmlPopupType(self):
""" returns the popup type """
if self._htmlPopupType is None:
self.__init()
return self._htmlPopupType
@property
def displayField(self):
""" returns the primary display field """
if self._displayField is None:
self.__init()
return self._displayField
@property
def typeIdField(self):
""" returns the type Id field """
if self._typeIdField is None:
self.__init()
return self._typeIdField
@property
def fields(self):
""" returns the layer's fields """
if self._fields is None:
self.__init()
return self._fields
@property
def types(self):
""" returns the types """
if self._types is None:
self.__init()
return self._types
@property
def relationships(self):
""" returns the relationships for the layer """
if self._relationships is None:
self.__init()
return self._relationships
@property
def maxRecordCount(self):
""" returns the maximum returned records """
if self._maxRecordCount is None:
self.__init()
if self._maxRecordCount is None:
self._maxRecordCount = 1000
return self._maxRecordCount
@property
def canModifyLayer(self):
""" returns boolean to say if layer can be modified """
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
@property
def supportsStatistics(self):
""" boolean to if supports statistics """
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
@property
def supportsAdvancedQueries(self):
""" boolean value if advanced queries is supported """
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
@property
def hasLabels(self):
""" returns if layer has labels on or not """
if self._hasLabels is None:
self.__init()
return self._hasLabels
@property
def canScaleSymbols(self):
""" states if symbols can scale """
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
@property
def capabilities(self):
""" operations that can be performed on layer """
if self._capabilities is None:
self.__init()
return self._capabilities
@property
def supportedQueryFormats(self):
""" returns supported query formats """
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
@property
def isDataVersioned(self):
""" returns boolean if data is in version control """
if self._isDataVersioned is None:
self.__init()
return self._isDataVersioned
@property
def ownershipBasedAccessControlForFeatures(self):
""" returns value for owernship based access control """
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
@property
def useStandardizedQueries(self):
""" returns value if standardized queries can be used """
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
def addAttachment(self, oid, file_path):
""" Adds an attachment to a feature service
Input:
oid - string - OBJECTID value to add attachment to
file_path - string - path to file
Output:
JSON Repsonse
"""
if self.hasAttachments == True:
attachURL = self._url + "/%s/addAttachment" % oid
params = {'f':'json'}
if not self._token is None:
params['token'] = self._token
parsed = urlparse.urlparse(attachURL)
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files=files,
fields=params,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return self._unicode_convert(json.loads(res))
else:
return "Attachments are not supported for this feature service."
#----------------------------------------------------------------------
def deleteAttachment(self, oid, attachment_id):
""" removes an attachment from a feature service feature
Input:
oid - integer or string - id of feature
attachment_id - integer - id of attachment to erase
Output:
JSON response
"""
url = self._url + "/%s/deleteAttachments" % oid
params = {
"f":"json",
"attachmentIds" : "%s" % attachment_id
}
if not self._token is None:
params['token'] = self._token
return self._do_post(url, params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateAttachment(self, oid, attachment_id, file_path):
""" updates an existing attachment with a new file
Inputs:
oid - string/integer - Unique record ID
attachment_id - integer - Unique attachment identifier
file_path - string - path to new attachment
Output:
JSON response
"""
url = self._url + "/%s/updateAttachment" % oid
params = {
"f":"json",
"attachmentId" : "%s" % attachment_id
}
if not self._token is None:
params['token'] = self._token
parsed = urlparse.urlparse(url)
port = parsed.port
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files=files,
port=port,
fields=params,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return self._unicode_convert(json.loads(res))
#----------------------------------------------------------------------
def listAttachments(self, oid):
""" list attachements for a given OBJECT ID """
url = self._url + "/%s/attachments" % oid
params = {
"f":"json"
}
if not self._token is None:
params['token'] = self._token
return self._do_get(url, params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def create_fc_template(self, out_path, out_name):
"""creates a featureclass template on local disk"""
fields = self.fields
objectIdField = self.objectIdField
geomType = self.geometryType
wkid = self.parentLayer.spatialReference['wkid']
return common.create_feature_class(out_path,
out_name,
geomType,
wkid,
fields,
objectIdField)
def create_feature_template(self):
"""creates a feature template"""
fields = self.fields
feat_schema = {}
att = {}
for fld in fields:
self._globalIdField
if not fld['name'] == self._objectIdField and not fld['name'] == self._globalIdField:
att[fld['name']] = ''
feat_schema['attributes'] = att
feat_schema['geometry'] = ''
return common.Feature(feat_schema)
#----------------------------------------------------------------------
def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
out_fc=None):
""" queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
"""
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
}
if not self._token is None:
params["token"] = self._token
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params['time'] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params['geometry'] = gf['geometry']
params['geometryType'] = gf['geometryType']
params['spatialRelationship'] = gf['spatialRel']
params['inSR'] = gf['inSR']
fURL = self._url + "/query"
results = self._do_get(fURL, params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = common.scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, 'wb') as writer:
writer.write(json_text)
writer.flush()
del writer
fc = common.json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
feats = []
for res in results['features']:
feats.append(common.Feature(res))
return feats
else:
return results
return
#----------------------------------------------------------------------
def query_related_records(self,
objectIds,
relationshipId,
outFields="*",
definitionExpression=None,
returnGeometry=True,
maxAllowableOffset=None,
geometryPrecision=None,
outWKID=None,
gdbVersion=None,
returnZ=False,
returnM=False):
"""
The Query operation is performed on a feature service layer
resource. The result of this operation are feature sets grouped
by source layer/table object IDs. Each feature set contains
Feature objects including the values for the fields requested by
the user. For related layers, if you request geometry
information, the geometry of each feature is also returned in
the feature set. For related tables, the feature set does not
include geometries.
Inputs:
objectIds - the object IDs of the table/layer to be queried
relationshipId - The ID of the relationship to be queried.
outFields - the list of fields from the related table/layer
to be included in the returned feature set. This
list is a comma delimited list of field names. If
you specify the shape field in the list of return
fields, it is ignored. To request geometry, set
returnGeometry to true.
You can also specify the wildcard "*" as the
value of this parameter. In this case, the result
s will include all the field values.
definitionExpression - The definition expression to be
applied to the related table/layer.
From the list of objectIds, only those
records that conform to this
expression are queried for related
records.
returnGeometry - If true, the feature set includes the
geometry associated with each feature. The
default is true.
maxAllowableOffset - This option can be used to specify the
maxAllowableOffset to be used for
generalizing geometries returned by the
query operation. The maxAllowableOffset
is in the units of the outSR. If outSR
is not specified, then
maxAllowableOffset is assumed to be in
the unit of the spatial reference of the
map.
geometryPrecision - This option can be used to specify the
number of decimal places in the response
geometries.
outWKID - The spatial reference of the returned geometry.
gdbVersion - The geodatabase version to query. This parameter
applies only if the isDataVersioned property of
the layer queried is true.
returnZ - If true, Z values are included in the results if
the features have Z values. Otherwise, Z values are
not returned. The default is false.
returnM - If true, M values are included in the results if
the features have M values. Otherwise, M values are
not returned. The default is false.
"""
params = {
"f" : "json",
"objectIds" : objectIds,
"relationshipId" : relationshipId,
"outFields" : outFields,
"returnGeometry" : returnGeometry,
"returnM" : returnM,
"returnZ" : returnZ
}
if self._token is not None:
params['token'] = self._token
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if definitionExpression is not None:
params['definitionExpression'] = definitionExpression
if outWKID is not None:
params['outSR'] = common.SpatialReference(outWKID).asDictionary
if maxAllowableOffset is not None:
params['maxAllowableOffset'] = maxAllowableOffset
if geometryPrecision is not None:
params['geometryPrecision'] = geometryPrecision
quURL = self._url + "/queryRelatedRecords"
res = self._do_get(url=quURL, param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def getHTMLPopup(self, oid):
"""
The htmlPopup resource provides details about the HTML pop-up
authored by the user using ArcGIS for Desktop.
Input:
oid - object id of the feature where the HTML pop-up
Output:
"""
if self.htmlPopupType != "esriServerHTMLPopupTypeNone":
popURL = self._url + "/%s/htmlPopup" % oid
params = {
'f' : "json"
}
if self._token is not None:
params['token'] = self._token
return self._do_get(url=popURL, param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return ""
#----------------------------------------------------------------------
def _chunks(self, l, n):
""" Yield n successive chunks from a list l.
"""
l.sort()
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
#----------------------------------------------------------------------
def get_local_copy(self, out_path, includeAttachments=False):
""" exports the whole feature service to a feature class
Input:
out_path - path to where the data will be placed
includeAttachments - default False. If sync is not supported
then the paramter is ignored.
Output:
path to exported feature class or fgdb (as list)
"""
if self.hasAttachments and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
returnAttachments=includeAttachments,
out_path=out_path)[0]
elif self.hasAttachments == False and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
out_path=out_path)[0]
else:
result_features = []
res = self.query(returnIDsOnly=True)
OIDS = res['objectIds']
OIDS.sort()
OIDField = res['objectIdFieldName']
count = len(OIDS)
if count <= self.maxRecordCount:
bins = 1
else:
bins = count / self.maxRecordCount
v = count % self.maxRecordCount
if v > 0:
bins += 1
chunks = self._chunks(OIDS, bins)
for chunk in chunks:
chunk.sort()
sql = "%s >= %s and %s <= %s" % (OIDField, chunk[0],
OIDField, chunk[len(chunk) -1])
temp_base = "a" + uuid.uuid4().get_hex()[:6] + "a"
temp_fc = r"%s\%s" % (common.scratchGDB(), temp_base)
temp_fc = self.query(where=sql,
returnFeatureClass=True,
out_fc=temp_fc)
result_features.append(temp_fc)
return common.merge_feature_class(merges=result_features,
out_fc=out_path)
#----------------------------------------------------------------------
def updateFeature(self,
features,
gdbVersion=None,
rollbackOnFailure=True):
"""
updates an existing feature in a feature service layer
Input:
feature - feature object(s) to get updated. A single feature
or a list of feature objects can be passed
Output:
dictionary of result messages
"""
params = {
"f" : "json",
"rollbackOnFailure" : rollbackOnFailure
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if self._token is not None:
params['token'] = self._token
if isinstance(features, common.Feature):
params['features'] = json.dumps([features.asDictionary])
elif isinstance(features, list):
vals = []
for feature in features:
if isinstance(feature, common.Feature):
vals.append(feature.asDictionary)
params['features'] = json.dumps(vals)
else:
return {'message' : "invalid inputs"}
updateURL = self._url + "/updateFeatures"
res = self._do_post(url=updateURL,
param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def deleteFeatures(self,
objectIds="",
where="",
geometryFilter=None,
gdbVersion=None,
rollbackOnFailure=True
):
""" removes 1:n features based on a sql statement
Input:
objectIds - The object IDs of this layer/table to be deleted
where - A where clause for the query filter. Any legal SQL
where clause operating on the fields in the layer is
allowed. Features conforming to the specified where
clause will be deleted.
geometryFilter - a filters.GeometryFilter object to limit
deletion by a geometry.
gdbVersion - Geodatabase version to apply the edits. This
parameter applies only if the isDataVersioned
property of the layer is true
rollbackOnFailure - parameter to specify if the edits should
be applied only if all submitted edits
succeed. If false, the server will apply
the edits that succeed even if some of
the submitted edits fail. If true, the
server will apply the edits only if all
edits succeed. The default value is true.
Output:
JSON response as dictionary
"""
dURL = self._url + "/deleteFeatures"
params = {
"f": "json",
}
if geometryFilter is not None and \
isinstance(geometryFilter, filters.GeometryFilter):
gfilter = geometryFilter.filter
params['geometry'] = gfilter['geometry']
params['geometryType'] = gfilter['geometryType']
params['inSR'] = gfilter['inSR']
params['spatialRel'] = gfilter['spatialRel']
if where is not None and \
where != "":
params['where'] = where
if objectIds is not None and \
objectIds != "":
params['objectIds'] = objectIds
if not self._token is None:
params['token'] = self._token
result = self._do_post(url=dURL, param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self.__init()
return result
#----------------------------------------------------------------------
def applyEdits(self,
addFeatures=[],
updateFeatures=[],
deleteFeatures=None,
gdbVersion=None,
rollbackOnFailure=True):
"""
This operation adds, updates, and deletes features to the
associated feature layer or table in a single call.
Inputs:
addFeatures - The array of features to be added. These
features should be common.Feature objects
updateFeatures - The array of features to be updateded.
These features should be common.Feature
objects
deleteFeatures - string of OIDs to remove from service
gdbVersion - Geodatabase version to apply the edits.
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
dictionary of messages
"""
editURL = self._url + "/applyEdits"
params = {"f": "json"
}
if self._token is not None:
params['token'] = self._token
if len(addFeatures) > 0 and \
isinstance(addFeatures[0], common.Feature):
params['adds'] = json.dumps([f.asDictionary for f in addFeatures],
default=common._date_handler)
if len(updateFeatures) > 0 and \
isinstance(updateFeatures[0], common.Feature):
params['updates'] = json.dumps([f.asDictionary for f in updateFeatures],
default=common._date_handler)
if deleteFeatures is not None and \
isinstance(deleteFeatures, str):
params['deletes'] = deleteFeatures
return self._do_post(url=editURL, param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if self._token is not None:
params['token'] = self._token
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=common._date_handler)
elif isinstance(features, common.Feature):
params['features'] = json.dumps([features.asDictionary],
default=common._date_handler)
else:
return None
return self._do_post(url=url,
param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeatures(self, fc, attachmentTable=None,
nameField="ATT_NAME", blobField="DATA",
contentTypeField="CONTENT_TYPE",
rel_object_field="REL_OBJECTID"):
""" adds a feature to the feature service
Inputs:
fc - string - path to feature class data to add.
attachmentTable - string - (optional) path to attachment table
nameField - string - (optional) name of file field in attachment table
blobField - string - (optional) name field containing blob data
contentTypeField - string - (optional) name of field containing content type
rel_object_field - string - (optional) name of field with OID of feature class
Output:
boolean, add results message as list of dictionaries
"""
messages = []
if attachmentTable is None:
count = 0
bins = 1
uURL = self._url + "/addFeatures"
max_chunk = 250
js = self._unicode_convert(
common.featureclass_to_json(fc))
js = js['features']
if len(js) <= max_chunk:
bins = 1
else:
bins = int(len(js)/max_chunk)
if len(js) % max_chunk > 0:
bins += 1
chunks = self._chunks(l=js, n=bins)
for chunk in chunks:
params = {
"f" : 'json',
"features" : json.dumps(chunk,
default=self._date_handler)
}
if not self._token is None:
params['token'] = self._token
result = self._do_post(url=uURL, param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
messages.append(result)
del params
del result
return True, messages
else:
oid_field = common.get_OID_field(fc)
OIDs = common.get_records_with_attachments(attachment_table=attachmentTable)
fl = common.create_feature_layer(fc, "%s not in ( %s )" % (oid_field, ",".join(OIDs)))
val, msgs = self.addFeatures(fl)
messages.append(msgs)
del fl
for oid in OIDs:
fl = common.create_feature_layer(fc, "%s = %s" % (oid_field, oid), name="layer%s" % oid)
val, msgs = self.addFeatures(fl)
for result in msgs[0]['addResults']:
oid_fs = result['objectId']
sends = common.get_attachment_data(attachmentTable, sql="%s = %s" % (rel_object_field, oid))
for s in sends:
messages.append(self.addAttachment(oid_fs, s['blob']))
del s
del sends
del result
messages.append(msgs)
del fl
del oid
del OIDs
return True, messages
########################################################################
class TableLayer(FeatureLayer):
"""Table object is exactly like FeatureLayer object"""
pass
| |
"""Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
# This module represents the integration of work, contributions, feedback, and
# suggestions from the following people:
#
# Martin von Loewis, who wrote the initial implementation of the underlying
# C-based libintlmodule (later renamed _gettext), along with a skeletal
# gettext.py implementation.
#
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
# which also included a pure-Python implementation to read .mo files if
# intlmodule wasn't available.
#
# James Henstridge, who also wrote a gettext.py module, which has some
# interesting, but currently unsupported experimental features: the notion of
# a Catalog class and instances, and the ability to add to a catalog file via
# a Python API.
#
# Barry Warsaw integrated these modules, wrote the .install() API and code,
# and conformed all C and Python code to Python's coding standards.
#
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
# module.
#
# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
#
# TODO:
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
# memory, but that's probably bad for large translated programs. Instead,
# the lexical sort of original strings in GNU .mo files should be exploited
# to do binary searches and lazy initializations. Or you might want to use
# the undocumented double-hash algorithm for .mo files with hash tables, but
# you'll need to study the GNU gettext code to do this.
#
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
# find this format documented anywhere.
import os
import re
import sys
__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
'bind_textdomain_codeset',
'dgettext', 'dngettext', 'gettext', 'lgettext', 'ldgettext',
'ldngettext', 'lngettext', 'ngettext',
'pgettext', 'dpgettext', 'npgettext', 'dnpgettext',
]
_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale')
# Expression parsing for plural form selection.
#
# The gettext library supports a small subset of C syntax. The only
# incompatible difference is that integer literals starting with zero are
# decimal.
#
# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms
# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y
_token_pattern = re.compile(r"""
(?P<WHITESPACES>[ \t]+) | # spaces and horizontal tabs
(?P<NUMBER>[0-9]+\b) | # decimal integer
(?P<NAME>n\b) | # only n is allowed
(?P<PARENTHESIS>[()]) |
(?P<OPERATOR>[-*/%+?:]|[><!]=?|==|&&|\|\|) | # !, *, /, %, +, -, <, >,
# <=, >=, ==, !=, &&, ||,
# ? :
# unary and bitwise ops
# not allowed
(?P<INVALID>\w+|.) # invalid token
""", re.VERBOSE|re.DOTALL)
def _tokenize(plural):
for mo in re.finditer(_token_pattern, plural):
kind = mo.lastgroup
if kind == 'WHITESPACES':
continue
value = mo.group(kind)
if kind == 'INVALID':
raise ValueError('invalid token in plural form: %s' % value)
yield value
yield ''
def _error(value):
if value:
return ValueError('unexpected token in plural form: %s' % value)
else:
return ValueError('unexpected end of plural form')
_binary_ops = (
('||',),
('&&',),
('==', '!='),
('<', '>', '<=', '>='),
('+', '-'),
('*', '/', '%'),
)
_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops}
_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'}
def _parse(tokens, priority=-1):
result = ''
nexttok = next(tokens)
while nexttok == '!':
result += 'not '
nexttok = next(tokens)
if nexttok == '(':
sub, nexttok = _parse(tokens)
result = '%s(%s)' % (result, sub)
if nexttok != ')':
raise ValueError('unbalanced parenthesis in plural form')
elif nexttok == 'n':
result = '%s%s' % (result, nexttok)
else:
try:
value = int(nexttok, 10)
except ValueError:
raise _error(nexttok) from None
result = '%s%d' % (result, value)
nexttok = next(tokens)
j = 100
while nexttok in _binary_ops:
i = _binary_ops[nexttok]
if i < priority:
break
# Break chained comparisons
if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>='
result = '(%s)' % result
# Replace some C operators by their Python equivalents
op = _c2py_ops.get(nexttok, nexttok)
right, nexttok = _parse(tokens, i + 1)
result = '%s %s %s' % (result, op, right)
j = i
if j == priority == 4: # '<', '>', '<=', '>='
result = '(%s)' % result
if nexttok == '?' and priority <= 0:
if_true, nexttok = _parse(tokens, 0)
if nexttok != ':':
raise _error(nexttok)
if_false, nexttok = _parse(tokens)
result = '%s if %s else %s' % (if_true, result, if_false)
if priority == 0:
result = '(%s)' % result
return result, nexttok
def _as_int(n):
try:
i = round(n)
except TypeError:
raise TypeError('Plural value must be an integer, got %s' %
(n.__class__.__name__,)) from None
import warnings
warnings.warn('Plural value must be an integer, got %s' %
(n.__class__.__name__,),
DeprecationWarning, 4)
return n
def c2py(plural):
"""Gets a C expression as used in PO files for plural forms and returns a
Python function that implements an equivalent expression.
"""
if len(plural) > 1000:
raise ValueError('plural form expression is too long')
try:
result, nexttok = _parse(_tokenize(plural))
if nexttok:
raise _error(nexttok)
depth = 0
for c in result:
if c == '(':
depth += 1
if depth > 20:
# Python compiler limit is about 90.
# The most complex example has 2.
raise ValueError('plural form expression is too complex')
elif c == ')':
depth -= 1
ns = {'_as_int': _as_int}
exec('''if True:
def func(n):
if not isinstance(n, int):
n = _as_int(n)
return int(%s)
''' % result, ns)
return ns['func']
except RecursionError:
# Recursion error can be raised in _parse() or exec().
raise ValueError('plural form expression is too complex')
def _expand_lang(loc):
import locale
loc = locale.normalize(loc)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = loc.find('@')
if pos >= 0:
modifier = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = loc.find('.')
if pos >= 0:
codeset = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = loc.find('_')
if pos >= 0:
territory = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = loc
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
self._output_charset = None
self._fallback = None
if fp is not None:
self._parse(fp)
def _parse(self, fp):
pass
def add_fallback(self, fallback):
if self._fallback:
self._fallback.add_fallback(fallback)
else:
self._fallback = fallback
def gettext(self, message):
if self._fallback:
return self._fallback.gettext(message)
return message
def lgettext(self, message):
import warnings
warnings.warn('lgettext() is deprecated, use gettext() instead',
DeprecationWarning, 2)
import locale
if self._fallback:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
DeprecationWarning)
return self._fallback.lgettext(message)
if self._output_charset:
return message.encode(self._output_charset)
return message.encode(locale.getpreferredencoding())
def ngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def lngettext(self, msgid1, msgid2, n):
import warnings
warnings.warn('lngettext() is deprecated, use ngettext() instead',
DeprecationWarning, 2)
import locale
if self._fallback:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
DeprecationWarning)
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def pgettext(self, context, message):
if self._fallback:
return self._fallback.pgettext(context, message)
return message
def npgettext(self, context, msgid1, msgid2, n):
if self._fallback:
return self._fallback.npgettext(context, msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def info(self):
return self._info
def charset(self):
return self._charset
def output_charset(self):
import warnings
warnings.warn('output_charset() is deprecated',
DeprecationWarning, 2)
return self._output_charset
def set_output_charset(self, charset):
import warnings
warnings.warn('set_output_charset() is deprecated',
DeprecationWarning, 2)
self._output_charset = charset
def install(self, names=None):
import builtins
builtins.__dict__['_'] = self.gettext
if names is not None:
allowed = {'gettext', 'lgettext', 'lngettext',
'ngettext', 'npgettext', 'pgettext'}
for name in allowed & set(names):
builtins.__dict__[name] = getattr(self, name)
class GNUTranslations(NullTranslations):
# Magic number of .mo files
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
# The encoding of a msgctxt and a msgid in a .mo file is
# msgctxt + "\x04" + msgid (gettext version >= 0.15)
CONTEXT = "%s\x04%s"
# Acceptable .mo versions
VERSIONS = (0, 1)
def _get_versions(self, version):
"""Returns a tuple of major version, minor version"""
return (version >> 16, version & 0xffff)
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
# Delay struct import for speeding up gettext import when .mo files
# are not used.
from struct import unpack
filename = getattr(fp, 'name', '')
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
self._catalog = catalog = {}
self.plural = lambda n: int(n != 1) # germanic plural by default
buf = fp.read()
buflen = len(buf)
# Are we big endian or little endian?
magic = unpack('<I', buf[:4])[0]
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise OSError(0, 'Bad magic number', filename)
major_version, minor_version = self._get_versions(version)
if major_version not in self.VERSIONS:
raise OSError(0, 'Bad version number ' + str(major_version), filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary.
for i in range(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx+8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise OSError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastk = None
for b_item in tmsg.split(b'\n'):
item = b_item.decode().strip()
if not item:
continue
# Skip over comment lines:
if item.startswith('#-#-#-#-#') and item.endswith('#-#-#-#-#'):
continue
k = v = None
if ':' in item:
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
lastk = k
elif lastk:
self._info[lastk] += '\n' + item
if k == 'content-type':
self._charset = v.split('charset=')[1]
elif k == 'plural-forms':
v = v.split(';')
plural = v[1].split('plural=')[1]
self.plural = c2py(plural)
# Note: we unconditionally convert both msgids and msgstrs to
# Unicode using the character encoding specified in the charset
# parameter of the Content-Type header. The gettext documentation
# strongly encourages msgids to be us-ascii, but some applications
# require alternative encodings (e.g. Zope's ZCML and ZPT). For
# traditional gettext applications, the msgid conversion will
# cause no problems since us-ascii should always be a subset of
# the charset encoding. We may want to fall back to 8-bit msgids
# if the Unicode conversion fails.
charset = self._charset or 'ascii'
if b'\x00' in msg:
# Plural forms
msgid1, msgid2 = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
msgid1 = str(msgid1, charset)
for i, x in enumerate(tmsg):
catalog[(msgid1, i)] = str(x, charset)
else:
catalog[str(msg, charset)] = str(tmsg, charset)
# advance to next entry in the seek tables
masteridx += 8
transidx += 8
def lgettext(self, message):
import warnings
warnings.warn('lgettext() is deprecated, use gettext() instead',
DeprecationWarning, 2)
import locale
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lgettext(message)
tmsg = message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def lngettext(self, msgid1, msgid2, n):
import warnings
warnings.warn('lngettext() is deprecated, use ngettext() instead',
DeprecationWarning, 2)
import locale
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def gettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.gettext(message)
return message
return tmsg
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
def pgettext(self, context, message):
ctxt_msg_id = self.CONTEXT % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.pgettext(context, message)
return message
return tmsg
def npgettext(self, context, msgid1, msgid2, n):
ctxt_msg_id = self.CONTEXT % (context, msgid1)
try:
tmsg = self._catalog[ctxt_msg_id, self.plural(n)]
except KeyError:
if self._fallback:
return self._fallback.npgettext(context, msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None, all=False):
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
if all:
result.append(mofile)
else:
return mofile
return result
# a mapping between absolute .mo file path and Translation object
_translations = {}
_unspecified = ['unspecified']
def translation(domain, localedir=None, languages=None,
class_=None, fallback=False, codeset=_unspecified):
if class_ is None:
class_ = GNUTranslations
mofiles = find(domain, localedir, languages, all=True)
if not mofiles:
if fallback:
return NullTranslations()
from errno import ENOENT
raise FileNotFoundError(ENOENT,
'No translation file found for domain', domain)
# Avoid opening, reading, and parsing the .mo file after it's been done
# once.
result = None
for mofile in mofiles:
key = (class_, os.path.abspath(mofile))
t = _translations.get(key)
if t is None:
with open(mofile, 'rb') as fp:
t = _translations.setdefault(key, class_(fp))
# Copy the translation object to allow setting fallbacks and
# output charset. All other instance data is shared with the
# cached object.
# Delay copy import for speeding up gettext import when .mo files
# are not used.
import copy
t = copy.copy(t)
if codeset is not _unspecified:
import warnings
warnings.warn('parameter codeset is deprecated',
DeprecationWarning, 2)
if codeset:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\bset_output_charset\b.*',
DeprecationWarning)
t.set_output_charset(codeset)
if result is None:
result = t
else:
result.add_fallback(t)
return result
def install(domain, localedir=None, codeset=_unspecified, names=None):
t = translation(domain, localedir, fallback=True, codeset=codeset)
t.install(names)
# a mapping b/w domains and locale directories
_localedirs = {}
# a mapping b/w domains and codesets
_localecodesets = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def bind_textdomain_codeset(domain, codeset=None):
import warnings
warnings.warn('bind_textdomain_codeset() is deprecated',
DeprecationWarning, 2)
global _localecodesets
if codeset is not None:
_localecodesets[domain] = codeset
return _localecodesets.get(domain)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None))
except OSError:
return message
return t.gettext(message)
def ldgettext(domain, message):
import warnings
warnings.warn('ldgettext() is deprecated, use dgettext() instead',
DeprecationWarning, 2)
import locale
codeset = _localecodesets.get(domain)
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*',
DeprecationWarning)
t = translation(domain, _localedirs.get(domain, None), codeset=codeset)
except OSError:
return message.encode(codeset or locale.getpreferredencoding())
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
DeprecationWarning)
return t.lgettext(message)
def dngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.ngettext(msgid1, msgid2, n)
def ldngettext(domain, msgid1, msgid2, n):
import warnings
warnings.warn('ldngettext() is deprecated, use dngettext() instead',
DeprecationWarning, 2)
import locale
codeset = _localecodesets.get(domain)
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*',
DeprecationWarning)
t = translation(domain, _localedirs.get(domain, None), codeset=codeset)
except OSError:
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg.encode(codeset or locale.getpreferredencoding())
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
DeprecationWarning)
return t.lngettext(msgid1, msgid2, n)
def dpgettext(domain, context, message):
try:
t = translation(domain, _localedirs.get(domain, None))
except OSError:
return message
return t.pgettext(context, message)
def dnpgettext(domain, context, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.npgettext(context, msgid1, msgid2, n)
def gettext(message):
return dgettext(_current_domain, message)
def lgettext(message):
import warnings
warnings.warn('lgettext() is deprecated, use gettext() instead',
DeprecationWarning, 2)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\bldgettext\b.*',
DeprecationWarning)
return ldgettext(_current_domain, message)
def ngettext(msgid1, msgid2, n):
return dngettext(_current_domain, msgid1, msgid2, n)
def lngettext(msgid1, msgid2, n):
import warnings
warnings.warn('lngettext() is deprecated, use ngettext() instead',
DeprecationWarning, 2)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\bldngettext\b.*',
DeprecationWarning)
return ldngettext(_current_domain, msgid1, msgid2, n)
def pgettext(context, message):
return dpgettext(_current_domain, context, message)
def npgettext(context, msgid1, msgid2, n):
return dnpgettext(_current_domain, context, msgid1, msgid2, n)
# dcgettext() has been deemed unnecessary and is not implemented.
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
# was:
#
# import gettext
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
# _ = cat.gettext
# print _('Hello World')
# The resulting catalog object currently don't support access through a
# dictionary API, which was supported (but apparently unused) in GNOME
# gettext.
Catalog = translation
| |
# Copyright (c) - 2015, Alex Meade
# Copyright (c) - 2015, Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp.eseries import utils
import cinder.volume.drivers.netapp.options as na_opts
MULTIATTACH_HOST_GROUP = {
'clusterRef': '8500000060080E500023C7340036035F515B78FC',
'label': utils.MULTI_ATTACH_HOST_GROUP_NAME,
}
FOREIGN_HOST_GROUP = {
'clusterRef': '8500000060080E500023C7340036035F515B78FD',
'label': 'FOREIGN HOST GROUP',
}
STORAGE_POOL = {
'label': 'DDP',
'volumeGroupRef': 'fakevolgroupref',
'raidLevel': 'raidDiskPool',
}
VOLUME = {
'extremeProtection': False,
'pitBaseVolume': True,
'dssMaxSegmentSize': 131072,
'totalSizeInBytes': '1073741824',
'raidLevel': 'raid6',
'volumeRef': '0200000060080E500023BB34000003FB515C2293',
'listOfMappings': [],
'sectorOffset': '15',
'id': '0200000060080E500023BB34000003FB515C2293',
'wwn': '60080E500023BB3400001FC352D14CB2',
'capacity': '2147483648',
'mgmtClientAttribute': 0,
'label': 'CFDXJ67BLJH25DXCZFZD4NSF54',
'volumeFull': False,
'blkSize': 512,
'volumeCopyTarget': False,
'volumeGroupRef': '0400000060080E500023BB3400001F9F52CECC3F',
'preferredControllerId': '070000000000000000000001',
'currentManager': '070000000000000000000001',
'applicationTagOwned': False,
'status': 'optimal',
'segmentSize': 131072,
'volumeUse': 'standardVolume',
'action': 'none',
'preferredManager': '070000000000000000000001',
'volumeHandle': 15,
'offline': False,
'preReadRedundancyCheckEnabled': False,
'dssPreallocEnabled': False,
'name': 'bdm-vc-test-1',
'worldWideName': '60080E500023BB3400001FC352D14CB2',
'currentControllerId': '070000000000000000000001',
'protectionInformationCapable': False,
'mapped': False,
'reconPriority': 1,
'protectionType': 'type1Protection'
}
INITIATOR_NAME = 'iqn.1998-01.com.vmware:localhost-28a58148'
INITIATOR_NAME_2 = 'iqn.1998-01.com.vmware:localhost-28a58149'
INITIATOR_NAME_3 = 'iqn.1998-01.com.vmware:localhost-28a58150'
WWPN = '20130080E5322230'
WWPN_2 = '20230080E5322230'
FC_TARGET_WWPNS = [
'500a098280feeba5',
'500a098290feeba5',
'500a098190feeba5',
'500a098180feeba5'
]
FC_I_T_MAP = {
'20230080E5322230': [
'500a098280feeba5',
'500a098290feeba5'
],
'20130080E5322230': [
'500a098190feeba5',
'500a098180feeba5'
]
}
FC_FABRIC_MAP = {
'fabricB': {
'target_port_wwn_list': [
'500a098190feeba5',
'500a098180feeba5'
],
'initiator_port_wwn_list': [
'20130080E5322230'
]
},
'fabricA': {
'target_port_wwn_list': [
'500a098290feeba5',
'500a098280feeba5'
],
'initiator_port_wwn_list': [
'20230080E5322230'
]
}
}
HOST = {
'isSAControlled': False,
'confirmLUNMappingCreation': False,
'label': 'stlrx300s7-55',
'isLargeBlockFormatHost': False,
'clusterRef': '8500000060080E500023C7340036035F515B78FC',
'protectionInformationCapableAccessMethod': False,
'ports': [],
'hostRef': '8400000060080E500023C73400300381515BFBA3',
'hostTypeIndex': 6,
'hostSidePorts': [{
'label': 'NewStore',
'type': 'iscsi',
'address': INITIATOR_NAME}]
}
HOST_2 = {
'isSAControlled': False,
'confirmLUNMappingCreation': False,
'label': 'stlrx300s7-55',
'isLargeBlockFormatHost': False,
'clusterRef': utils.NULL_REF,
'protectionInformationCapableAccessMethod': False,
'ports': [],
'hostRef': '8400000060080E500023C73400300381515BFBA5',
'hostTypeIndex': 6,
'hostSidePorts': [{
'label': 'NewStore', 'type': 'iscsi',
'address': INITIATOR_NAME_2}]
}
# HOST_3 has all lun_ids in use.
HOST_3 = {
'isSAControlled': False,
'confirmLUNMappingCreation': False,
'label': 'stlrx300s7-55',
'isLargeBlockFormatHost': False,
'clusterRef': '8500000060080E500023C73400360351515B78FC',
'protectionInformationCapableAccessMethod': False,
'ports': [],
'hostRef': '8400000060080E501023C73400800381515BFBA5',
'hostTypeIndex': 6,
'hostSidePorts': [{
'label': 'NewStore', 'type': 'iscsi',
'address': INITIATOR_NAME_3}],
}
VOLUME_MAPPING = {
'lunMappingRef': '8800000000000000000000000000000000000000',
'lun': 0,
'ssid': 16384,
'perms': 15,
'volumeRef': VOLUME['volumeRef'],
'type': 'all',
'mapRef': HOST['hostRef']
}
# VOLUME_MAPPING_3 corresponding to HOST_3 has all lun_ids in use.
VOLUME_MAPPING_3 = {
'lunMappingRef': '8800000000000000000000000000000000000000',
'lun': range(255),
'ssid': 16384,
'perms': 15,
'volumeRef': VOLUME['volumeRef'],
'type': 'all',
'mapRef': HOST_3['hostRef'],
}
VOLUME_MAPPING_TO_MULTIATTACH_GROUP = copy.deepcopy(VOLUME_MAPPING)
VOLUME_MAPPING_TO_MULTIATTACH_GROUP.update(
{'mapRef': MULTIATTACH_HOST_GROUP['clusterRef']}
)
STORAGE_SYSTEM = {
'freePoolSpace': 11142431623168,
'driveCount': 24,
'hostSparesUsed': 0, 'id':
'1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b',
'hotSpareSizeAsString': '0', 'wwn':
'60080E500023C73400000000515AF323',
'parameters': {
'minVolSize': 1048576, 'maxSnapshotsPerBase': 16,
'maxDrives': 192,
'maxVolumes': 512,
'maxVolumesPerGroup': 256,
'maxMirrors': 0,
'maxMappingsPerVolume': 1,
'maxMappableLuns': 256,
'maxVolCopys': 511,
'maxSnapshots': 256
}, 'hotSpareCount': 0,
'hostSpareCountInStandby': 0,
'status': 'needsattn',
'trayCount': 1,
'usedPoolSpaceAsString': '5313000380416',
'ip2': '10.63.165.216',
'ip1': '10.63.165.215',
'freePoolSpaceAsString': '11142431623168',
'types': 'SAS',
'name': 'stle2600-7_8',
'hotSpareSize': 0,
'usedPoolSpace': 5313000380416,
'driveTypes': ['sas'],
'unconfiguredSpaceByDriveType': {},
'unconfiguredSpaceAsStrings': '0',
'model': '2650',
'unconfiguredSpace': 0
}
SNAPSHOT_GROUP = {
'status': 'optimal',
'autoDeleteLimit': 0,
'maxRepositoryCapacity': '-65536',
'rollbackStatus': 'none',
'unusableRepositoryCapacity': '0',
'pitGroupRef':
'3300000060080E500023C7340000098D5294AC9A',
'clusterSize': 65536,
'label': 'C6JICISVHNG2TFZX4XB5ZWL7O',
'maxBaseCapacity': '476187142128128',
'repositoryVolume': '3600000060080E500023BB3400001FA952CEF12C',
'fullWarnThreshold': 99,
'repFullPolicy': 'purgepit',
'action': 'none',
'rollbackPriority': 'medium',
'creationPendingStatus': 'none',
'consistencyGroupRef': '0000000000000000000000000000000000000000',
'volumeHandle': 49153,
'consistencyGroup': False,
'baseVolume': '0200000060080E500023C734000009825294A534'
}
SNAPSHOT_IMAGE = {
'status': 'optimal',
'pitCapacity': '2147483648',
'pitTimestamp': '1389315375',
'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A',
'creationMethod': 'user',
'repositoryCapacityUtilization': '2818048',
'activeCOW': True,
'isRollbackSource': False,
'pitRef': '3400000060080E500023BB3400631F335294A5A8',
'pitSequenceNumber': '19'
}
HARDWARE_INVENTORY = {
'iscsiPorts': [
{
'controllerId':
'070000000000000000000002',
'ipv4Enabled': True,
'ipv4Data': {
'ipv4Address': '0.0.0.0',
'ipv4AddressConfigMethod':
'configStatic',
'ipv4VlanId': {
'isEnabled': False,
'value': 0
},
'ipv4AddressData': {
'ipv4Address': '172.20.123.66',
'ipv4SubnetMask': '255.255.255.0',
'configState': 'configured',
'ipv4GatewayAddress': '0.0.0.0'
}
},
'tcpListenPort': 3260,
'interfaceRef': '2202040000000000000000000000000000000000',
'iqn': 'iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323'
}
],
'fibrePorts': [
{
"channel": 1,
"loopID": 126,
"speed": 800,
"hardAddress": 6,
"nodeName": "20020080E5322230",
"portName": "20130080E5322230",
"portId": "011700",
"topology": "fabric",
"part": "PM8032 ",
"revision": 8,
"chanMiswire": False,
"esmMiswire": False,
"linkStatus": "up",
"isDegraded": False,
"speedControl": "auto",
"maxSpeed": 800,
"speedNegError": False,
"reserved1": "000000000000000000000000",
"reserved2": "",
"ddsChannelState": 0,
"ddsStateReason": 0,
"ddsStateWho": 0,
"isLocal": True,
"channelPorts": [],
"currentInterfaceSpeed": "speed8gig",
"maximumInterfaceSpeed": "speed8gig",
"interfaceRef": "2202020000000000000000000000000000000000",
"physicalLocation": {
"trayRef": "0000000000000000000000000000000000000000",
"slot": 0,
"locationParent": {
"refType": "generic",
"controllerRef": None,
"symbolRef": "0000000000000000000000000000000000000000",
"typedReference": None
},
"locationPosition": 0
},
"isTrunkCapable": False,
"trunkMiswire": False,
"protectionInformationCapable": True,
"controllerId": "070000000000000000000002",
"interfaceId": "2202020000000000000000000000000000000000",
"addressId": "20130080E5322230",
"niceAddressId": "20:13:00:80:E5:32:22:30"
},
{
"channel": 2,
"loopID": 126,
"speed": 800,
"hardAddress": 7,
"nodeName": "20020080E5322230",
"portName": "20230080E5322230",
"portId": "011700",
"topology": "fabric",
"part": "PM8032 ",
"revision": 8,
"chanMiswire": False,
"esmMiswire": False,
"linkStatus": "up",
"isDegraded": False,
"speedControl": "auto",
"maxSpeed": 800,
"speedNegError": False,
"reserved1": "000000000000000000000000",
"reserved2": "",
"ddsChannelState": 0,
"ddsStateReason": 0,
"ddsStateWho": 0,
"isLocal": True,
"channelPorts": [],
"currentInterfaceSpeed": "speed8gig",
"maximumInterfaceSpeed": "speed8gig",
"interfaceRef": "2202030000000000000000000000000000000000",
"physicalLocation": {
"trayRef": "0000000000000000000000000000000000000000",
"slot": 0,
"locationParent": {
"refType": "generic",
"controllerRef": None,
"symbolRef": "0000000000000000000000000000000000000000",
"typedReference": None
},
"locationPosition": 0
},
"isTrunkCapable": False,
"trunkMiswire": False,
"protectionInformationCapable": True,
"controllerId": "070000000000000000000002",
"interfaceId": "2202030000000000000000000000000000000000",
"addressId": "20230080E5322230",
"niceAddressId": "20:23:00:80:E5:32:22:30"
},
]
}
VOLUME_COPY_JOB = {
"status": "complete",
"cloneCopy": True,
"pgRef": "3300000060080E500023C73400000ACA52D29454",
"volcopyHandle": 49160,
"idleTargetWriteProt": True,
"copyPriority": "priority2",
"volcopyRef": "1800000060080E500023C73400000ACF52D29466",
"worldWideName": "60080E500023C73400000ACF52D29466",
"copyCompleteTime": "0",
"sourceVolume": "3500000060080E500023C73400000ACE52D29462",
"currentManager": "070000000000000000000002",
"copyStartTime": "1389551671",
"reserved1": "00000000",
"targetVolume": "0200000060080E500023C73400000A8C52D10675",
}
def create_configuration_eseries():
config = conf.Configuration(None)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_eseries_opts)
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'rw'
config.netapp_password = 'rw'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '8080'
config.netapp_storage_pools = 'DDP'
config.netapp_storage_family = 'eseries'
config.netapp_sa_password = 'saPass'
config.netapp_controller_ips = '10.11.12.13,10.11.12.14'
config.netapp_webservice_path = '/devmgr/v2'
config.netapp_enable_multiattach = False
return config
def deepcopy_return_value_method_decorator(fn):
'''Returns a deepcopy of the returned value of the wrapped function.'''
def decorator(*args, **kwargs):
return copy.deepcopy(fn(*args, **kwargs))
return decorator
def deepcopy_return_value_class_decorator(cls):
'''Wraps all 'non-protected' methods of a class with the
deepcopy_return_value_method_decorator decorator.
'''
class NewClass(cls):
def __getattribute__(self, attr_name):
obj = super(NewClass, self).__getattribute__(attr_name)
if (hasattr(obj, '__call__') and not attr_name.startswith('_')
and not isinstance(obj, mock.Mock)):
return deepcopy_return_value_method_decorator(obj)
return obj
return NewClass
@deepcopy_return_value_class_decorator
class FakeEseriesClient(object):
def __init__(self, *args, **kwargs):
pass
def list_storage_pools(self):
return [STORAGE_POOL]
def register_storage_system(self, *args, **kwargs):
return {
'freePoolSpace': '17055871480319',
'driveCount': 24,
'wwn': '60080E500023C73400000000515AF323',
'id': '1',
'hotSpareSizeAsString': '0',
'hostSparesUsed': 0,
'types': '',
'hostSpareCountInStandby': 0,
'status': 'optimal',
'trayCount': 1,
'usedPoolSpaceAsString': '37452115456',
'ip2': '10.63.165.216',
'ip1': '10.63.165.215',
'freePoolSpaceAsString': '17055871480319',
'hotSpareCount': 0,
'hotSpareSize': '0',
'name': 'stle2600-7_8',
'usedPoolSpace': '37452115456',
'driveTypes': ['sas'],
'unconfiguredSpaceByDriveType': {},
'unconfiguredSpaceAsStrings': '0',
'model': '2650',
'unconfiguredSpace': '0'
}
def list_volumes(self):
return [VOLUME]
def delete_volume(self, vol):
pass
def create_host_group(self, name):
return MULTIATTACH_HOST_GROUP
def get_host_group(self, ref):
return MULTIATTACH_HOST_GROUP
def list_host_groups(self):
return [MULTIATTACH_HOST_GROUP]
def get_host_group_by_name(self, name, *args, **kwargs):
host_groups = self.list_host_groups()
return [host_group for host_group in host_groups
if host_group['label'] == name][0]
def set_host_group_for_host(self, *args, **kwargs):
pass
def create_host_with_ports(self, *args, **kwargs):
return HOST
def list_hosts(self):
return [HOST, HOST_2]
def get_host(self, *args, **kwargs):
return HOST
def create_volume_mapping(self, *args, **kwargs):
return VOLUME_MAPPING
def get_volume_mappings(self):
return [VOLUME_MAPPING]
def get_volume_mappings_for_volume(self, volume):
return [VOLUME_MAPPING]
def get_volume_mappings_for_host(self, host_ref):
return [VOLUME_MAPPING]
def get_volume_mappings_for_host_group(self, hg_ref):
return [VOLUME_MAPPING]
def delete_volume_mapping(self):
return
def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id):
return {'lun': lun_id}
def list_storage_system(self):
return STORAGE_SYSTEM
def list_storage_systems(self):
return [STORAGE_SYSTEM]
def list_snapshot_groups(self):
return [SNAPSHOT_GROUP]
def list_snapshot_images(self):
return [SNAPSHOT_IMAGE]
def list_host_types(self):
return [
{
'id': '4',
'code': 'AIX',
'name': 'AIX',
'index': 4
},
{
'id': '5',
'code': 'IRX',
'name': 'IRX',
'index': 5
},
{
'id': '6',
'code': 'LnxALUA',
'name': 'LnxALUA',
'index': 6
}
]
def list_hardware_inventory(self):
return HARDWARE_INVENTORY
def create_volume_copy_job(self, *args, **kwargs):
return VOLUME_COPY_JOB
def list_vol_copy_job(self, *args, **kwargs):
return VOLUME_COPY_JOB
def delete_vol_copy_job(self, *args, **kwargs):
pass
def delete_snapshot_volume(self, *args, **kwargs):
pass
def list_target_wwpns(self, *args, **kwargs):
return [WWPN_2]
| |
# See http://zulip.readthedocs.io/en/latest/events-system.html for
# high-level documentation on how this system works.
from __future__ import absolute_import
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Text, Union
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.timezone import now as timezone_now
from collections import deque
import datetime
import os
import time
import socket
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.autoreload
import tornado.ioloop
import random
import traceback
from zerver.models import UserProfile, Client
from zerver.decorator import RespondAsynchronously
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
from zerver.tornado.exceptions import BadEventQueueIdError
import copy
import six
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor(object):
def __init__(self, user_profile_id, user_profile_email, realm_id, event_queue,
event_types, client_type_name, apply_markdown=True,
all_public_streams=False, lifespan_secs=0, narrow=[]):
# type: (int, Text, int, EventQueue, Optional[Sequence[str]], Text, bool, bool, int, Iterable[Sequence[Text]]) -> None
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.current_client_name = None # type: Optional[Text]
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.add_timeout
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self):
# type: () -> str
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
ret = cls(d['user_profile_id'], d['user_profile_email'], d['realm_id'],
EventQueue.from_dict(d['event_queue']), d['event_types'],
d['client_type_name'], d['apply_markdown'], d['all_public_streams'],
d['queue_timeout'], d.get('narrow', []))
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self):
# type: () -> None
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event):
# type: (Dict[str, Any]) -> None
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self):
# type: () -> bool
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event):
# type: (Mapping[str, Any]) -> bool
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self):
# type: () -> bool
return self.event_types is None or "message" in self.event_types
def idle(self, now):
# type: (float) -> bool
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id, client_name):
# type: (int, Text) -> None
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback():
# type: () -> None
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
heartbeat_time = time.time() + HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != 'API: heartbeat test':
self._timeout_handle = ioloop.add_timeout(heartbeat_time, timeout_callback)
def disconnect_handler(self, client_closed=False):
# type: (bool) -> None
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id, None)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info("Client disconnected for queue %s (%s via %s)" %
(self.event_queue.id, self.user_profile_email,
self.current_client_name))
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self):
# type: () -> None
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can cleanup their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
{self.realm_id})
def compute_full_event_type(event):
# type: (Mapping[str, Any]) -> str
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue(object):
def __init__(self, id):
# type: (str) -> None
self.queue = deque() # type: ignore # Should be Deque[Dict[str, Any]], but Deque isn't available in Python 3.4
self.next_event_id = 0 # type: int
self.id = id # type: str
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d):
# type: (Dict[str, Any]) -> EventQueue
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event):
# type: (Dict[str, Any]) -> None
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self):
# type: () -> Dict[str, Any]
return self.queue.popleft()
def empty(self):
# type: () -> bool
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id):
# type: (int) -> None
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self):
# type: () -> List[Dict[str, Any]]
contents = [] # type: List[Dict[str, Any]]
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {} # type: Dict[str, ClientDescriptor]
# maps user id to list of client descriptors
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
next_queue_id = 0
def add_client_gc_hook(hook):
# type: (Callable[[int, ClientDescriptor, bool], None]) -> None
gc_hooks.append(hook)
def get_client_descriptor(queue_id):
# type: (str) -> ClientDescriptor
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id):
# type: (int) -> List[ClientDescriptor]
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id):
# type: (int) -> List[ClientDescriptor]
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client):
# type: (ClientDescriptor) -> None
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove, affected_users, affected_realms):
# type: (AbstractSet[str], AbstractSet[int], AbstractSet[int]) -> None
def filter_client_dict(client_dict, key):
# type: (MutableMapping[int, List[ClientDescriptor]], int) -> None
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues():
# type: () -> None
start = time.time()
to_remove = set() # type: Set[str]
affected_users = set() # type: Set[int]
affected_realms = set() # type: Set[int]
for (id, client) in six.iteritems(clients):
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle and thus
# not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.' +
' Now %d active queues, %s')
% (len(to_remove), len(affected_users), time.time() - start,
len(clients), handler_stats_string()))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def dump_event_queues():
# type: () -> None
start = time.time()
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in six.iteritems(clients)],
stored_queues)
logging.info('Tornado dumped %d event queues in %.3fs'
% (len(clients), time.time() - start))
def load_event_queues():
# type: () -> None
global clients
start = time.time()
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Could not deserialize event queues")
except (IOError, EOFError):
pass
for client in six.itervalues(clients):
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado loaded %d event queues in %.3fs'
% (len(clients), time.time() - start))
def send_restart_events(immediate=False):
# type: (bool) -> None
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
if immediate:
event['immediate'] = True
for client in six.itervalues(clients):
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue():
# type: () -> None
if not settings.TEST_SUITE:
load_event_queues()
atexit.register(dump_event_queues)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
tornado.autoreload.add_reload_hook(dump_event_queues) # type: ignore # TODO: Fix missing tornado.autoreload stub
try:
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query):
# type: (Mapping[str, Any]) -> Dict[str, Any]
queue_id = query["queue_id"] # type: str
dont_block = query["dont_block"] # type: bool
last_event_id = query["last_event_id"] # type: int
user_profile_id = query["user_profile_id"] # type: int
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
user_profile_email = query["user_profile_email"] # type: Text
client_type_name = query["client_type_name"] # type: Text
handler_id = query["handler_id"] # type: int
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response = dict(events=client.event_queue.contents(),
handler_id=handler_id) # type: Dict[str, Any]
if orig_queue_id is None:
response['queue_id'] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
response["events"][0]["type"])
else:
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
client_type_name))
except JsonableError as e:
return dict(type="error", exception=e)
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
# type: (requests.Response) -> Dict[str, Any]
if requests_json_is_function:
return resp.json()
else:
return resp.json # type: ignore # mypy trusts the stub, not the runtime type checking of this fn
def request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types=None, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, int, Optional[Iterable[str]], bool, Iterable[Sequence[Text]]) -> Optional[str]
if settings.TORNADO_SERVER:
req = {'dont_block': 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'all_public_streams': ujson.dumps(all_public_streams),
'client': 'internal',
'user_client': user_client.name,
'narrow': ujson.dumps(narrow),
'lifespan_secs': queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
try:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params=req)
except requests.adapters.ConnectionError:
logging.error('Tornado server does not seem to be running, check %s '
'and %s for more information.' %
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
raise requests.adapters.ConnectionError(
"Django cannot connect to Tornado server (%s); try restarting" %
(settings.TORNADO_SERVER))
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile, queue_id, last_event_id):
# type: (UserProfile, str, int) -> List[Dict]
if settings.TORNADO_SERVER:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params={'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'client': 'internal'})
resp.raise_for_status()
return extract_json_response(resp)['events']
return []
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id, message_id):
# type: (int, int) -> Dict[str, Any]
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id, queue, last_for_client):
# type: (int, ClientDescriptor, bool) -> None
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
message_ids_to_notify = [] # type: List[Dict[str, Any]]
for event in queue.event_queue.contents():
if not event['type'] == 'message' or not event['flags']:
continue
if 'mentioned' in event['flags'] and 'read' not in event['flags']:
notify_info = dict(message_id=event['message']['id'])
if not event.get('push_notified', False):
notify_info['send_push'] = True
if not event.get('email_notified', False):
notify_info['send_email'] = True
message_ids_to_notify.append(notify_info)
for notify_info in message_ids_to_notify:
msg_id = notify_info['message_id']
notice = build_offline_notification(user_profile_id, msg_id)
if notify_info.get('send_push', False):
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
if notify_info.get('send_email', False):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
def receiver_is_idle(user_profile_id, realm_presences):
# type: (int, Optional[Dict[int, Dict[Text, Dict[str, Any]]]]) -> bool
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
# It's possible a recipient is not in the realm of a sender. We don't have
# presence information in this case (and it's hard to get without an additional
# db query) so we simply don't try to guess if this cross-realm recipient
# has been idle for too long
if realm_presences is None or user_profile_id not in realm_presences:
return off_zulip
# We want to find the newest "active" presence entity and compare that to the
# activity expiry threshold.
user_presence = realm_presences[user_profile_id]
latest_active_timestamp = None
idle = False
for client, status in six.iteritems(user_presence):
if (latest_active_timestamp is None or status['timestamp'] > latest_active_timestamp) and \
status['status'] == 'active':
latest_active_timestamp = status['timestamp']
if latest_active_timestamp is None:
idle = True
else:
active_datetime = timestamp_to_datetime(latest_active_timestamp)
# 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS
idle = timezone_now() - active_datetime > datetime.timedelta(seconds=140)
return off_zulip or idle
def process_message_event(event_template, users):
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
realm_presences = {int(k): v for k, v in event_template['presences'].items()} # type: Dict[int, Dict[Text, Dict[str, Any]]]
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
message_dict_markdown = event_template['message_dict_markdown'] # type: Dict[str, Any]
message_dict_no_markdown = event_template['message_dict_no_markdown'] # type: Dict[str, Any]
sender_id = message_dict_markdown['sender_id'] # type: int
message_id = message_dict_markdown['id'] # type: int
message_type = message_dict_markdown['type'] # type: str
sending_client = message_dict_markdown['client'] # type: Text
# To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
send_to_clients = {} # type: Dict[str, Dict[str, Any]]
# Extra user-specific data to include
extra_user_data = {} # type: Dict[int, Any]
if 'stream_name' in event_template and not event_template.get("invite_only"):
for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
received_pm = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags
idle = receiver_is_idle(user_profile_id, realm_presences)
always_push_notify = user_data.get('always_push_notify', False)
if (received_pm or mentioned) and (idle or always_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
notified = dict(push_notified=True) # type: Dict[str, bool]
# Don't send missed message emails if always_push_notify is True
if idle:
# We require RabbitMQ to do this, as we can't call the email handler
# from the Tornado process. So if there's no rabbitmq support do nothing
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
extra_user_data[user_profile_id] = notified
for client_data in six.itervalues(send_to_clients):
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False) # type: bool
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
if client.apply_markdown:
message_dict = message_dict_markdown
else:
message_dict = message_dict_no_markdown
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
if flags is not None:
message_dict['is_mentioned'] = 'mentioned' in flags
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type_name.lower()):
continue
client.add_event(user_event)
def process_event(event, users):
# type: (Mapping[str, Any], Iterable[int]) -> None
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(dict(event))
def process_userdata_event(event_template, users):
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_notification(notice):
# type: (Mapping[str, Any]) -> None
event = notice['event'] # type: Mapping[str, Any]
users = notice['users'] # type: Union[Iterable[int], Iterable[Mapping[str, Any]]]
if event['type'] in ["update_message", "delete_message"]:
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "message":
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
else:
process_event(event, cast(Iterable[int], users))
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(data):
# type: (Mapping[str, Any]) -> None
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
requests_client.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_notification(data):
# type: (Mapping[str, Any]) -> None
queue_json_publish("notify_tornado", data, send_notification_http)
def send_event(event, users):
# type: (Mapping[str, Any], Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
queue_json_publish("notify_tornado",
dict(event=event, users=users),
send_notification_http)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for fused batch norm operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, epsilon, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
factor = element_count / max(element_count - 1, 1)
corrected_var = var * factor
normalized = (x - mean) / np.sqrt(var + epsilon)
return (normalized * scale + offset), mean, var, corrected_var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(*DATA_FORMATS)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref, _ = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref_corr, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearning(self, data_format):
self._testLearning(False, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearningWithGradientChecker(self, data_format):
self._testLearning(True, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
# The TensorFlow FusedBatchNormGrad training operation takes two inputs with
# implementation defined values. In theory the only correct value these
# inputs are the corresponding reserve_space_{1|2} outputs from the
# FusedBatchNorm training operation. However, in practice, we rely on the
# first one being mean on {C|G}PU, and the second one being variance on CPU
# and inverse(sqrt(variance + epsilon)) on GPU (we test this assumption
# separately).
reserve_space_1_val = mean_val
if self.device == "XLA_GPU":
reserve_space_2_val = np.reciprocal(np.sqrt(var_val + epsilon))
else:
reserve_space_2_val = var_val
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
reserve_space_1 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_1")
reserve_space_2 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_2")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
reserve_space_1,
reserve_space_2,
data_format=data_format,
is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
reserve_space_1: reserve_space_1_val,
reserve_space_2: reserve_space_2_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
Model test set
"""
import unittest
from math import sqrt
from tr55.model import runoff_nrcs, \
simulate_cell_day, simulate_water_quality, \
create_unmodified_census, create_modified_census, \
simulate_day
from tr55.tablelookup import lookup_ki
# These data are taken directly from Table 2-1 of the revised (1986)
# TR-55 report. The data in the PS array are various precipitation
# levels, and each respective CNx array is the calculated runoff for
# that particular curve number with the given level of precipitation
# corresponding to that in PS.
PS = [1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0] # noqa
CN55 = [0.000, 0.000, 0.000, 0.000, 0.000, 0.020, 0.080, 0.190, 0.350, 0.530, 0.740, 0.980, 1.520, 2.120, 2.780, 3.490, 4.230, 5.000, 5.790, 6.610, 7.440, 8.290] # noqa
CN70 = [0.000, 0.030, 0.060, 0.110, 0.170, 0.240, 0.460, 0.710, 1.010, 1.330, 1.670, 2.040, 2.810, 3.620, 4.460, 5.330, 6.220, 7.130, 8.050, 8.980, 9.910, 10.85] # noqa
CN80 = [0.080, 0.150, 0.240, 0.340, 0.440, 0.560, 0.890, 1.250, 1.640, 2.040, 2.460, 2.890, 3.780, 4.690, 5.630, 6.570, 7.520, 8.480, 9.450, 10.42, 11.39, 12.37] # noqa
CN90 = [0.320, 0.460, 0.610, 0.760, 0.930, 1.090, 1.530, 1.980, 2.450, 2.920, 3.400, 3.880, 4.850, 5.820, 6.810, 7.790, 8.780, 9.770, 10.76, 11.76, 12.75, 13.74] # noqa
# INPUT and OUTPUT are data that were emailed to Azavea in a spreadsheet for
# testing the TR-55 model implementation. The types were converted to NLCD
# strings based on the NLCD type number used by tables.py to calculate
# model results.
INPUT = [
(0.5, 'a:open_water'),
(1, 'a:open_water'),
(2, 'a:open_water'),
(3.2, 'a:open_water'),
(8, 'a:open_water'),
(0.5, 'a:barren_land'),
(1, 'a:barren_land'),
(2, 'a:barren_land'),
(3.2, 'a:barren_land'),
(8, 'a:barren_land'),
(0.5, 'a:developed_open'),
(1, 'a:developed_open'),
(2, 'a:developed_open'),
(3.2, 'a:developed_open'),
(8, 'a:developed_open'),
(0.5, 'a:developed_low'),
(1, 'a:developed_low'),
(2, 'a:developed_low'),
(3.2, 'a:developed_low'),
(8, 'a:developed_low'),
(0.5, 'a:developed_med'),
(1, 'a:developed_med'),
(2, 'a:developed_med'),
(3.2, 'a:developed_med'),
(8, 'a:developed_med'),
(0.5, 'a:developed_high'),
(1, 'a:developed_high'),
(2, 'a:developed_high'),
(3.2, 'a:developed_high'),
(8, 'a:developed_high'),
(0.5, 'a:deciduous_forest'),
(0.5, 'a:evergreen_forest'),
(0.5, 'a:mixed_forest'),
(1, 'a:deciduous_forest'),
(1, 'a:evergreen_forest'),
(1, 'a:mixed_forest'),
(2, 'a:deciduous_forest'),
(2, 'a:evergreen_forest'),
(2, 'a:mixed_forest'),
(3.2, 'a:deciduous_forest'),
(3.2, 'a:evergreen_forest'),
(3.2, 'a:mixed_forest'),
(8, 'a:deciduous_forest'),
(8, 'a:evergreen_forest'),
(8, 'a:mixed_forest'),
(0.5, 'a:grassland'),
(1, 'a:grassland'),
(2, 'a:grassland'),
(3.2, 'a:grassland'),
(8, 'a:grassland'),
(0.5, 'a:pasture'),
(1, 'a:pasture'),
(2, 'a:pasture'),
(3.2, 'a:pasture'),
(8, 'a:pasture'),
(0.5, 'a:cultivated_crops'),
(1, 'a:cultivated_crops'),
(2, 'a:cultivated_crops'),
(3.2, 'a:cultivated_crops'),
(8, 'a:cultivated_crops'),
(0.5, 'a:woody_wetlands'),
(0.5, 'a:herbaceous_wetlands'),
(1, 'a:woody_wetlands'),
(1, 'a:herbaceous_wetlands'),
(2, 'a:woody_wetlands'),
(2, 'a:herbaceous_wetlands'),
(3.2, 'a:woody_wetlands'),
(3.2, 'a:herbaceous_wetlands'),
(8, 'a:woody_wetlands'),
(8, 'a:herbaceous_wetlands'),
(0.5, 'b:open_water'),
(1, 'b:open_water'),
(2, 'b:open_water'),
(3.2, 'b:open_water'),
(8, 'b:open_water'),
(0.5, 'b:barren_land'),
(1, 'b:barren_land'),
(2, 'b:barren_land'),
(3.2, 'b:barren_land'),
(8, 'b:barren_land'),
(0.5, 'b:developed_open'),
(1, 'b:developed_open'),
(2, 'b:developed_open'),
(3.2, 'b:developed_open'),
(8, 'b:developed_open'),
(0.5, 'b:developed_low'),
(1, 'b:developed_low'),
(2, 'b:developed_low'),
(3.2, 'b:developed_low'),
(8, 'b:developed_low'),
(0.5, 'b:developed_med'),
(1, 'b:developed_med'),
(2, 'b:developed_med'),
(3.2, 'b:developed_med'),
(8, 'b:developed_med'),
(0.5, 'b:developed_high'),
(1, 'b:developed_high'),
(2, 'b:developed_high'),
(3.2, 'b:developed_high'),
(8, 'b:developed_high'),
(0.5, 'b:deciduous_forest'),
(0.5, 'b:evergreen_forest'),
(0.5, 'b:mixed_forest'),
(1, 'b:deciduous_forest'),
(1, 'b:evergreen_forest'),
(1, 'b:mixed_forest'),
(2, 'b:deciduous_forest'),
(2, 'b:evergreen_forest'),
(2, 'b:mixed_forest'),
(3.2, 'b:deciduous_forest'),
(3.2, 'b:evergreen_forest'),
(3.2, 'b:mixed_forest'),
(8, 'b:deciduous_forest'),
(8, 'b:evergreen_forest'),
(8, 'b:mixed_forest'),
(0.5, 'b:grassland'),
(1, 'b:grassland'),
(2, 'b:grassland'),
(3.2, 'b:grassland'),
(8, 'b:grassland'),
(0.5, 'b:pasture'),
(1, 'b:pasture'),
(2, 'b:pasture'),
(3.2, 'b:pasture'),
(8, 'b:pasture'),
(0.5, 'b:cultivated_crops'),
(1, 'b:cultivated_crops'),
(2, 'b:cultivated_crops'),
(3.2, 'b:cultivated_crops'),
(8, 'b:cultivated_crops'),
(0.5, 'b:woody_wetlands'),
(0.5, 'b:herbaceous_wetlands'),
(1, 'b:woody_wetlands'),
(1, 'b:herbaceous_wetlands'),
(2, 'b:woody_wetlands'),
(2, 'b:herbaceous_wetlands'),
(3.2, 'b:woody_wetlands'),
(3.2, 'b:herbaceous_wetlands'),
(8, 'b:woody_wetlands'),
(8, 'b:herbaceous_wetlands'),
(0.5, 'c:open_water'),
(1, 'c:open_water'),
(2, 'c:open_water'),
(3.2, 'c:open_water'),
(8, 'c:open_water'),
(0.5, 'c:barren_land'),
(1, 'c:barren_land'),
(2, 'c:barren_land'),
(3.2, 'c:barren_land'),
(8, 'c:barren_land'),
(0.5, 'c:developed_open'),
(1, 'c:developed_open'),
(2, 'c:developed_open'),
(3.2, 'c:developed_open'),
(8, 'c:developed_open'),
(0.5, 'c:developed_low'),
(1, 'c:developed_low'),
(2, 'c:developed_low'),
(3.2, 'c:developed_low'),
(8, 'c:developed_low'),
(0.5, 'c:developed_med'),
(1, 'c:developed_med'),
(2, 'c:developed_med'),
(3.2, 'c:developed_med'),
(8, 'c:developed_med'),
(0.5, 'c:developed_high'),
(1, 'c:developed_high'),
(2, 'c:developed_high'),
(3.2, 'c:developed_high'),
(8, 'c:developed_high'),
(0.5, 'c:deciduous_forest'),
(0.5, 'c:evergreen_forest'),
(0.5, 'c:mixed_forest'),
(1, 'c:deciduous_forest'),
(1, 'c:evergreen_forest'),
(1, 'c:mixed_forest'),
(2, 'c:deciduous_forest'),
(2, 'c:evergreen_forest'),
(2, 'c:mixed_forest'),
(3.2, 'c:deciduous_forest'),
(3.2, 'c:evergreen_forest'),
(3.2, 'c:mixed_forest'),
(8, 'c:deciduous_forest'),
(8, 'c:evergreen_forest'),
(8, 'c:mixed_forest'),
(0.5, 'c:grassland'),
(1, 'c:grassland'),
(2, 'c:grassland'),
(3.2, 'c:grassland'),
(8, 'c:grassland'),
(0.5, 'c:pasture'),
(1, 'c:pasture'),
(2, 'c:pasture'),
(3.2, 'c:pasture'),
(8, 'c:pasture'),
(0.5, 'c:cultivated_crops'),
(1, 'c:cultivated_crops'),
(2, 'c:cultivated_crops'),
(3.2, 'c:cultivated_crops'),
(8, 'c:cultivated_crops'),
(0.5, 'c:woody_wetlands'),
(0.5, 'c:herbaceous_wetlands'),
(1, 'c:woody_wetlands'),
(1, 'c:herbaceous_wetlands'),
(2, 'c:woody_wetlands'),
(2, 'c:herbaceous_wetlands'),
(3.2, 'c:woody_wetlands'),
(3.2, 'c:herbaceous_wetlands'),
(8, 'c:woody_wetlands'),
(8, 'c:herbaceous_wetlands'),
(0.5, 'd:open_water'),
(1, 'd:open_water'),
(2, 'd:open_water'),
(3.2, 'd:open_water'),
(8, 'd:open_water'),
(0.5, 'd:barren_land'),
(1, 'd:barren_land'),
(2, 'd:barren_land'),
(3.2, 'd:barren_land'),
(8, 'd:barren_land'),
(0.5, 'd:developed_open'),
(1, 'd:developed_open'),
(2, 'd:developed_open'),
(3.2, 'd:developed_open'),
(8, 'd:developed_open'),
(0.5, 'd:developed_low'),
(1, 'd:developed_low'),
(2, 'd:developed_low'),
(3.2, 'd:developed_low'),
(8, 'd:developed_low'),
(0.5, 'd:developed_med'),
(1, 'd:developed_med'),
(2, 'd:developed_med'),
(3.2, 'd:developed_med'),
(8, 'd:developed_med'),
(0.5, 'd:developed_high'),
(1, 'd:developed_high'),
(2, 'd:developed_high'),
(3.2, 'd:developed_high'),
(8, 'd:developed_high'),
(0.5, 'd:deciduous_forest'),
(0.5, 'd:evergreen_forest'),
(0.5, 'd:mixed_forest'),
(1, 'd:deciduous_forest'),
(1, 'd:evergreen_forest'),
(1, 'd:mixed_forest'),
(2, 'd:deciduous_forest'),
(2, 'd:evergreen_forest'),
(2, 'd:mixed_forest'),
(3.2, 'd:deciduous_forest'),
(3.2, 'd:evergreen_forest'),
(3.2, 'd:mixed_forest'),
(8, 'd:deciduous_forest'),
(8, 'd:evergreen_forest'),
(8, 'd:mixed_forest'),
(0.5, 'd:grassland'),
(1, 'd:grassland'),
(2, 'd:grassland'),
(3.2, 'd:grassland'),
(8, 'd:grassland'),
(0.5, 'd:pasture'),
(1, 'd:pasture'),
(2, 'd:pasture'),
(3.2, 'd:pasture'),
(8, 'd:pasture'),
(0.5, 'd:cultivated_crops'),
(1, 'd:cultivated_crops'),
(2, 'd:cultivated_crops'),
(3.2, 'd:cultivated_crops'),
(8, 'd:cultivated_crops'),
(0.5, 'd:woody_wetlands'),
(0.5, 'd:herbaceous_wetlands'),
(1, 'd:woody_wetlands'),
(1, 'd:herbaceous_wetlands'),
(2, 'd:woody_wetlands'),
(2, 'd:herbaceous_wetlands'),
(3.2, 'd:woody_wetlands'),
(3.2, 'd:herbaceous_wetlands'),
(8, 'd:woody_wetlands'),
(8, 'd:herbaceous_wetlands')
]
OUTPUT = [
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0, 0, 0.5),
(0, 0, 1),
(0.4, 0, 1.6),
(1.2, 0, 2),
(5.3, 0, 2.7),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(0.2, 0.1, 1.7),
(0.7, 0.1, 2.3),
(4.2, 0.1, 3.6),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0, 0.1, 1.9),
(0.2, 0.1, 3),
(2.4, 0.1, 5.6),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(0.4, 0, 1.5),
(1.2, 0, 2),
(5.3, 0, 2.7),
(0.5, 0, 0),
(1, 0, 0),
(1, 0, 1),
(2.1, 0, 1.1),
(6.7, 0, 1.3),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0, 0.1, 1.9),
(0, 0.1, 1.9),
(0, 0.1, 3.1),
(0, 0.1, 3.1),
(0, 0.1, 3.1),
(0.4, 0.1, 7.4),
(0.4, 0.1, 7.4),
(0.4, 0.1, 7.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0, 0.1, 3.1),
(0.4, 0.1, 7.5),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0, 0.1, 3.1),
(1.2, 0.1, 6.7),
(0, 0.2, 0.3),
(0, 0.2, 0.8),
(0.2, 0.2, 1.6),
(0.7, 0.2, 2.3),
(4.1, 0.2, 3.7),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0),
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0, 0, 0.5),
(0.2, 0, 0.8),
(0.8, 0, 1.2),
(1.8, 0, 1.4),
(6.3, 0, 1.7),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(0.5, 0.1, 1.3),
(1.3, 0.1, 1.7),
(5.5, 0.1, 2.3),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0.2, 0.1, 1.7),
(0.7, 0.1, 2.4),
(4.2, 0.1, 3.7),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(0.8, 0, 1.2),
(1.8, 0, 1.4),
(6.2, 0, 1.7),
(0.5, 0, 0),
(1, 0, 0),
(1.2, 0, 0.8),
(2.4, 0, 0.8),
(7, 0, 0.9),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 1.8),
(0, 0.1, 1.8),
(0, 0.1, 1.8),
(0.3, 0.1, 2.8),
(0.3, 0.1, 2.8),
(0.3, 0.1, 2.8),
(2.8, 0.1, 5.1),
(2.8, 0.1, 5.1),
(2.8, 0.1, 5.1),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0.3, 0.1, 2.8),
(2.8, 0.1, 5.1),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0.1, 0.1, 1.8),
(0.4, 0.1, 2.6),
(3.4, 0.1, 4.4),
(0, 0.2, 0.3),
(0.1, 0.2, 0.8),
(0.5, 0.2, 1.3),
(1.3, 0.2, 1.7),
(5.4, 0.2, 2.4),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0),
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0.1, 0, 0.4),
(0.4, 0, 0.6),
(1.2, 0, 0.8),
(2.3, 0, 0.9),
(6.9, 0, 1.1),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(0.8, 0.1, 1),
(1.8, 0.1, 1.2),
(6.3, 0.1, 1.5),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0.5, 0.1, 1.4),
(1.3, 0.1, 1.8),
(5.5, 0.1, 2.4),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(1.1, 0, 0.9),
(2.2, 0, 1),
(6.8, 0, 1.2),
(0.5, 0, 0),
(1, 0, 0),
(1.4, 0, 0.6),
(2.5, 0, 0.6),
(7.3, 0, 0.7),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0.2, 0.1, 1.6),
(0.2, 0.1, 1.6),
(0.2, 0.1, 1.6),
(0.8, 0.1, 2.2),
(0.8, 0.1, 2.2),
(0.8, 0.1, 2.2),
(4.5, 0.1, 3.4),
(4.5, 0.1, 3.4),
(4.5, 0.1, 3.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0.2, 0.1, 1.6),
(0.8, 0.1, 2.2),
(4.5, 0.1, 3.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0.3, 0.1, 1.5),
(1, 0.1, 2),
(4.9, 0.1, 2.9),
(0, 0.2, 0.3),
(0.2, 0.2, 0.6),
(0.8, 0.2, 1),
(1.8, 0.2, 1.3),
(6.2, 0.2, 1.6),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0),
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0.1, 0, 0.4),
(0.5, 0, 0.5),
(1.4, 0, 0.6),
(2.5, 0, 0.7),
(7.3, 0, 0.7),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(1, 0.1, 0.8),
(2.1, 0.1, 1),
(6.7, 0.1, 1.2),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0.7, 0.1, 1.2),
(1.7, 0.1, 1.4),
(6.1, 0.1, 1.8),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(1.2, 0, 0.7),
(2.4, 0, 0.8),
(7, 0, 0.9),
(0.5, 0, 0),
(1, 0, 0),
(1.5, 0, 0.5),
(2.6, 0, 0.5),
(7.4, 0, 0.6),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0.4, 0.1, 1.4),
(0.4, 0.1, 1.4),
(0.4, 0.1, 1.4),
(1.2, 0.1, 1.8),
(1.2, 0.1, 1.8),
(1.2, 0.1, 1.8),
(5.3, 0.1, 2.6),
(5.3, 0.1, 2.6),
(5.3, 0.1, 2.6),
(0, 0.1, 0.4),
(0, 0.1, 0.8),
(0.4, 0.1, 1.4),
(1.2, 0.1, 1.9),
(5.3, 0.1, 2.6),
(0, 0.1, 0.4),
(0.1, 0.1, 0.8),
(0.6, 0.1, 1.3),
(1.4, 0.1, 1.7),
(5.6, 0.1, 2.2),
(0, 0.2, 0.3),
(0.3, 0.2, 0.5),
(1, 0.2, 0.8),
(2.1, 0.2, 0.9),
(6.7, 0.2, 1.1),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0)
]
CENSUS_1 = {
'cell_count': 147,
'distribution': {
'c:developed_high': {
'cell_count': 42
},
'a:deciduous_forest': {
'cell_count': 72
},
'd:developed_med': {
'cell_count': 33
}
},
'modifications': [
{
'change': '::no_till',
'cell_count': 30,
'distribution': {
'c:developed_high': {
'cell_count': 20
},
'd:developed_med': {
'cell_count': 10
}
}
},
{
'change': 'd:barren_land:',
'cell_count': 5,
'distribution': {
'a:deciduous_forest': {
'cell_count': 5
}
},
}
]
}
DAY_OUTPUT_1 = {
'unmodified': {
'inf': 1.4762466686413165,
'cell_count': 147,
'tp': 0.048497869127119175,
'tn': 0.3010544583784289,
'runoff': 0.4408688415627653,
'et': 0.08288448979591835,
'distribution': {
'c:developed_high': {
'cell_count': 42,
'tp': 0.03354942097300307,
'tn': 0.21201370198217218,
'runoff': 0.9904463051399999,
'et': 0.01242,
'inf': 0.9971336948599999,
'bod': 28.889779171197087,
'tss': 5.892117058383664
},
'a:deciduous_forest': {
'cell_count': 72,
'tp': 0.0,
'tn': 0.0,
'runoff': 0.0,
'et': 0.14489999999999997,
'inf': 1.8550999999999997,
'bod': 0.0,
'tss': 0.0
},
'd:developed_med': {
'cell_count': 33,
'tp': 0.014948448154116101,
'tn': 0.08904075639625678,
'runoff': 0.7033022695105,
'et': 0.037259999999999995,
'inf': 1.2594377304895001,
'bod': 15.33840767118,
'tss': 1.832809730200322
}
},
'bod': 44.228186842377085,
'tss': 7.724926788583986
},
'modified': {
'inf': 1.4364676745914813,
'cell_count': 147,
'tp': 0.04396329106364022,
'tn': 0.27220178808403017,
'runoff': 0.4517804886738248,
'et': 0.11175183673469387,
'distribution': {
'c:developed_high': {
'inf': 1.077061870392374,
'cell_count': 42,
'tp': 0.02803732401552826,
'tn': 0.1771803114870189,
'runoff': 0.827718129607626,
'et': 0.09522,
'distribution': {
'c:developed_high': {
'cell_count': 22,
'tp': 0.017573506223953986,
'tn': 0.11105479627637589,
'runoff': 0.99044630514,
'et': 0.012419999999999999,
'inf': 0.99713369486,
'bod': 15.132741470627044,
'tss': 3.086347030581919
},
'c:developed_high:no_till': {
'cell_count': 20,
'tp': 0.010463817791574277,
'tn': 0.06612551521064301,
'runoff': 0.6487171365220146,
'et': 0.1863,
'inf': 1.1649828634779853,
'bod': 9.010509764966741,
'tss': 1.8377079996452328
}
},
'bod': 24.143251235593787,
'tss': 4.9240550302271515
},
'a:deciduous_forest': {
'inf': 1.7681957171334695,
'cell_count': 72,
'tp': 3.9101893977641545e-05,
'tn': 0.0003910189397764155,
'runoff': 0.09696678286653043,
'et': 0.13483749999999997,
'distribution': {
'a:deciduous_forest': {
'cell_count': 67,
'tp': 0.0,
'tn': 0.0,
'runoff': 0.0,
'et': 0.14489999999999997,
'inf': 1.8551,
'bod': 0.0,
'tss': 0.0
},
'd:barren_land:': {
'cell_count': 5,
'tp': 3.9101893977641545e-05,
'tn': 0.0003910189397764155,
'runoff': 1.3963216732780384,
'et': 0.0,
'inf': 0.6036783267219616,
'bod': 5.1614500050486845,
'tss': 0.03910189397764155
}
},
'bod': 5.1614500050486845,
'tss': 0.03910189397764155
},
'd:developed_med': {
'inf': 1.1701229689350983,
'cell_count': 33,
'tp': 0.015886865154134316,
'tn': 0.09463045765723485,
'runoff': 0.7474533947012655,
'et': 0.08242363636363635,
'distribution': {
'd:developed_med:no_till': {
'cell_count': 10,
'tp': 0.005468249773992795,
'tn': 0.03257174865378317,
'runoff': 0.8490009826400262,
'et': 0.1863,
'inf': 0.9646990173599737,
'bod': 5.610899768096954,
'tss': 0.6704549722895514
},
'd:developed_med': {
'cell_count': 23,
'tp': 0.010418615380141523,
'tn': 0.06205870900345169,
'runoff': 0.7033022695104999,
'et': 0.037259999999999995,
'inf': 1.2594377304895001,
'bod': 10.690405346579997,
'tss': 1.2774128422608306
}
},
'bod': 16.30130511467695,
'tss': 1.947867814550382
}
},
'bod': 45.60600635531942,
'tss': 6.9110247387551755
}
}
CENSUS_2 = {
'cell_count': 4,
'distribution': {
'd:developed_med': {'cell_count': 1},
'c:developed_high': {'cell_count': 1},
'a:deciduous_forest': {'cell_count': 1},
'b:pasture': {'cell_count': 1}
},
'modifications': [
{
'change': '::no_till',
'cell_count': 1,
'distribution': {
'b:pasture': {'cell_count': 1}
}
},
{
'change': '::cluster_housing',
'cell_count': 1,
'distribution': {
'd:developed_med': {'cell_count': 1}
}
},
{
'change': '::rain_garden',
'cell_count': 1,
'distribution': {
'c:developed_high': {'cell_count': 1}
}
}
]
}
DAY_OUTPUT_2 = {
'unmodified': {
'inf': 1.4785857682509507,
'cell_count': 4,
'tp': 0.0013746500037446765,
'tn': 0.008688160939430185,
'runoff': 0.4417192317490494,
'et': 0.07969499999999999,
'distribution': {
'c:developed_high': {
'cell_count': 1,
'tp': 0.0007987957374524541,
'tn': 0.005047945285289814,
'runoff': 0.99044630514,
'et': 0.012419999999999999,
'inf': 0.99713369486,
'bod': 0.687851885028502,
'tss': 0.14028850139008725
},
'a:deciduous_forest': {
'cell_count': 1,
'tp': 0.0,
'tn': 0.0,
'runoff': 0.0,
'et': 0.14489999999999997,
'inf': 1.8551,
'bod': 0.0,
'tss': 0.0
},
'b:pasture': {
'cell_count': 1,
'tp': 0.00012287098889476473,
'tn': 0.0009420109148598631,
'runoff': 0.0731283523456977,
'et': 0.12419999999999999,
'inf': 1.8026716476543023,
'bod': 0.04095699629825491,
'tss': 0.020478498149127455
},
'd:developed_med': {
'cell_count': 1,
'tp': 0.0004529832773974576,
'tn': 0.002698204739280508,
'runoff': 0.7033022695105,
'et': 0.037259999999999995,
'inf': 1.2594377304895001,
'bod': 0.46480023245999996,
'tss': 0.05553968879394915
}
},
'bod': 1.1936091137867568,
'tss': 0.21630668833316385
},
'modified': {
'inf': 1.4978906201690463,
'cell_count': 4,
'tp': 0.0014947940356953506,
'tn': 0.010093182575177441,
'runoff': 0.3934343798309537,
'et': 0.108675,
'distribution': {
'c:developed_high': {
'inf': 1.0641101843915999,
'cell_count': 1,
'tp': 0.0007414402317520271,
'tn': 0.004685490353432948,
'runoff': 0.9193298156084,
'et': 0.01656,
'distribution': {
'c:developed_high': {
'cell_count': 0,
'runoff': 0,
'et': 0,
'inf': 0
},
'c:developed_high:rain_garden': {
'cell_count': 1,
'tp': 0.0007414402317520271,
'tn': 0.004685490353432948,
'runoff': 0.9193298156084,
'et': 0.01656,
'inf': 1.0641101843915999,
'bod': 0.6384624217864677,
'tss': 0.13021544070144975
}
},
'bod': 0.6384624217864677,
'tss': 0.13021544070144975
},
'a:deciduous_forest': {
'cell_count': 1,
'tp': 0.0,
'tn': 0.0,
'runoff': 0.0,
'et': 0.14489999999999997,
'inf': 1.8551,
'distribution': {
'a:deciduous_forest': {
'cell_count': 1,
'tp': 0.0,
'tn': 0.0,
'runoff': 0.0,
'et': 0.14489999999999997,
'inf': 1.8551,
'bod': 0.0,
'tss': 0.0
}
},
'bod': 0.0,
'tss': 0.0
},
'b:pasture': {
'inf': 1.4934093771285855,
'cell_count': 1,
'tp': 0.0005381555074547796,
'tn': 0.004125858890486643,
'runoff': 0.32029062287141463,
'et': 0.1863,
'distribution': {
'b:pasture:no_till': {
'cell_count': 1,
'tp': 0.0005381555074547796,
'tn': 0.004125858890486643,
'runoff': 0.32029062287141463,
'et': 0.1863,
'inf': 1.4934093771285855,
'bod': 0.1793851691515932,
'tss': 0.0896925845757966
},
'b:pasture': {
'cell_count': 0,
'runoff': 0,
'et': 0,
'inf': 0
}
},
'bod': 0.1793851691515932,
'tss': 0.0896925845757966
},
'd:developed_med': {
'inf': 1.5789429191559998,
'cell_count': 1,
'tp': 0.000215198296488544,
'tn': 0.001281833331257849,
'runoff': 0.3341170808440001,
'et': 0.08693999999999999,
'distribution': {
'd:developed_med:cluster_housing': {
'cell_count': 1,
'tp': 0.000215198296488544,
'tn': 0.001281833331257849,
'runoff': 0.3341170808440001,
'et': 0.08693999999999999,
'inf': 1.5789429191559998,
'bod': 0.220812165092593,
'tss': 0.026385182439030177
},
'd:developed_med': {
'cell_count': 0,
'runoff': 0,
'et': 0,
'inf': 0
}
},
'bod': 0.220812165092593,
'tss': 0.026385182439030177
}
},
'bod': 1.0386597560306539,
'tss': 0.24629320771627652
}
}
def simulate(precip, tile_string):
land_use = tile_string.split(':')[1]
ki = lookup_ki(land_use)
return simulate_cell_day(precip, 0.207 * ki, tile_string, 1)
def average(l):
return reduce(lambda x, y: x + y, l) / len(l)
class TestModel(unittest.TestCase):
"""
Model test set.
"""
def test_nrcs(self):
"""
Test the implementation of the runoff equation.
"""
# This pair has CN=55
runoffs = [round(runoff_nrcs(precip, 0.0, 'b', 'deciduous_forest'), 2)
for precip in PS]
# Low curve number and low P cause too-high runoff
self.assertEqual(runoffs[4:], CN55[4:])
# This pair has CN=70
runoffs = [round(runoff_nrcs(precip, 0.0, 'c', 'deciduous_forest'), 2)
for precip in PS]
self.assertEqual(runoffs[1:], CN70[1:])
# This pair has CN=80
runoffs = [round(runoff_nrcs(precip, 0.0, 'd', 'pasture'), 2)
for precip in PS]
self.assertEqual(runoffs, CN80)
# This pair has CN=90
runoffs = [round(runoff_nrcs(precip, 0.0, 'c', 'developed_med'), 2)
for precip in PS]
self.assertEqual(runoffs, CN90)
def test_simulate_day_1(self):
"""
Test the tile simulation using sample input/output.
"""
# The number 0.04 is not very meaningful, this test just
# attempts to give some idea about the mean error of the three
# quantities -- relative to precipitation -- as compared to
# the sample output that was emailed to us.
def similar(incoming, expected):
precip, tile_string = incoming
results = simulate(precip, tile_string + ':')
results = (results['runoff-vol'],
results['et-vol'],
results['inf-vol'])
lam = lambda x, y: abs(x - y) / precip
me = average(map(lam, results, expected))
# Precipitation levels <= 2 inches are known to be
# problematic. It is unclear why the 'barren_land' type is
# giving trouble on soil types C and D.
if precip > 2 and tile_string != 'c:barren_land' \
and tile_string != 'd:barren_land':
self.assertTrue(me < 0.04, tile_string + ' ' + str(me))
map(similar, INPUT, OUTPUT)
def test_simulate_day_2(self):
"""
Another test of the tile simulation using sample input/output.
"""
# Test the RMSE of the runoff levels produced by the tile
# simulation against values sample input/output. The number
# 0.13 is not very meaningful, this test just attempts to put
# a bound on the deviation between the current output and the
# sample output that was mailed to us.
results = [simulate(precip, tile_string + ':')['runoff-vol'] / precip
for precip, tile_string in INPUT
if precip > 2 and tile_string != 'c:barren_land' and
tile_string != 'd:barren_land']
expected = [OUTPUT[i][0] / INPUT[i][0]
for i in range(len(INPUT))
if INPUT[i][0] > 2 and INPUT[i][1] != 'c:barren_land' and
INPUT[i][1] != 'd:barren_land']
lam = lambda x, y: pow((x - y), 2)
rmse = sqrt(average(map(lam, results, expected)))
self.assertTrue(rmse < 0.13)
def test_simulate_day_3(self):
"""
Daily simulation.
"""
result1 = simulate_cell_day(42, 93, 'a:barren_land:', 1)
result2 = simulate_cell_day(42, 93, 'a:barren_land:', 2)
self.assertEqual(result1['runoff-vol'] * 2, result2['runoff-vol'])
def test_create_unmodified_census(self):
"""
Test create_unmodified_census.
"""
census = {
"cell_count": 2,
"distribution": {
"a:barren_land": {"cell_count": 1},
"a:open_water": {"cell_count": 1}
},
"modifications": [
{
"change": "::cluster_housing",
"cell_count": 1,
"distribution": {
"a:barren_land": {"cell_count": 1}
}
}
]
}
result = create_unmodified_census(census)
census.pop("modifications", None)
self.assertEqual(census, result)
def test_create_modified_census_1(self):
"""
create_modified_census from a census w/o modifications.
"""
census = {
"cell_count": 5,
"distribution": {
"a:barren_land": {"cell_count": 3},
"a:open_water": {"cell_count": 2}
}
}
expected = {
"cell_count": 5,
"distribution": {
"a:barren_land": {
"cell_count": 3,
"distribution": {"a:barren_land": {"cell_count": 3}}
},
"a:open_water": {
"cell_count": 2,
"distribution": {"a:open_water": {"cell_count": 2}}
}
}
}
actual = create_modified_census(census)
self.assertEqual(actual, expected)
def test_create_modified_census_2(self):
"""
create_modified_census from a census w/ trivial modifications.
"""
census = {
"cell_count": 3,
"distribution": {
"a:barren_land": {"cell_count": 2},
"a:open_water": {"cell_count": 1}
},
"modifications": []
}
expected = {
"cell_count": 3,
"distribution": {
"a:barren_land": {
"cell_count": 2,
"distribution": {"a:barren_land": {"cell_count": 2}}
},
"a:open_water": {
"cell_count": 1,
"distribution": {"a:open_water": {"cell_count": 1}}
}
}
}
actual = create_modified_census(census)
self.assertEqual(actual, expected)
def test_create_modified_census_3(self):
"""
create_modified_census with non-trivial modifications.
"""
census = {
"cell_count": 144,
"distribution": {
"a:barren_land": {"cell_count": 55},
"a:open_water": {"cell_count": 89}
},
"modifications": [
{
"change": "::cluster_housing",
"cell_count": 34,
"distribution": {
"a:barren_land": {"cell_count": 34}
}
}
]
}
expected = {
"cell_count": 144,
"distribution": {
"a:barren_land": {
"cell_count": 55,
"distribution": {
"a:barren_land:cluster_housing": {"cell_count": 34},
"a:barren_land": {"cell_count": 21}
}
},
"a:open_water": {
"cell_count": 89,
"distribution": {
"a:open_water": {"cell_count": 89}
}
}
}
}
actual = create_modified_census(census)
self.assertEqual(actual, expected)
def test_create_modified_census_4(self):
"""
create_modified_census with different types of changes.
"""
census = {
"distribution": {
"a:developed_low": {
"cell_count": 3
}
},
"cell_count": 3,
"modifications": [
{
"distribution": {
"a:developed_low": {
"cell_count": 1
}
},
"cell_count": 1,
"change": ":deciduous_forest:cluster_housing"
},
{
"distribution": {
"a:developed_low": {
"cell_count": 1
}
},
"cell_count": 1,
"change": ":deciduous_forest:"
},
{
"distribution": {
"a:developed_low": {
"cell_count": 1
}
},
"cell_count": 1,
"change": "::cluster_housing"
},
]
}
expected = set([
'a:deciduous_forest:',
'a:developed_low',
'a:deciduous_forest:cluster_housing',
'a:developed_low:cluster_housing'])
modified = create_modified_census(census)
distrib = modified['distribution']['a:developed_low']['distribution']
actual = set(distrib.keys())
self.assertEqual(actual, expected)
def test_simulate_water_quality_1(self):
"""
Test the water quality simulation.
"""
census = {
"cell_count": 5,
"distribution": {
"a:barren_land": {"cell_count": 3},
"a:open_water": {"cell_count": 2}
}
}
def fn(cell, cell_count):
return simulate_cell_day(5, 0.207, cell, cell_count)
simulate_water_quality(census, 93, fn)
left = census['distribution']['a:barren_land']
right = census['distribution']['a:open_water']
for key in set(census.keys()) - set(['distribution']):
self.assertEqual(left[key] + right[key], census[key])
def test_simulate_water_quality_2(self):
"""
Test the water quality simulation in the presence of modifications.
"""
census = {
"cell_count": 3,
"distribution": {
"a:barren_land": {"cell_count": 2},
"a:open_water": {"cell_count": 1}
},
"modifications": [
{
"change": "d:developed_med:",
"cell_count": 1,
"distribution": {
"a:barren_land": {"cell_count": 1}
}
}
]
}
census1 = create_modified_census(census)
census2 = {
"cell_count": 3,
"distribution": {
"a:barren_land": {"cell_count": 1},
"d:developed_med": {"cell_count": 1},
"a:open_water": {"cell_count": 1}
}
}
def fn(cell, cell_count):
return simulate_cell_day(5, 0.207, cell, cell_count)
simulate_water_quality(census1, 93, fn)
simulate_water_quality(census2, 93, fn)
for key in set(census1.keys()) - set(['distribution']):
self.assertEqual(census1[key], census2[key])
def test_simulate_water_quality_precolumbian(self):
"""
Test the water quality simulation in Pre-Columbian times.
"""
census1 = {
"cell_count": 8,
"distribution": {
"a:developed_med": {"cell_count": 1},
"b:no_till": {"cell_count": 1},
"c:pasture": {"cell_count": 1},
"d:cultivated_crops": {"cell_count": 1},
"a:open_water": {"cell_count": 1},
"b:shrub": {"cell_count": 1},
"c:barren_land": {"cell_count": 1},
"d:developed_open": {"cell_count": 1}
}
}
census2 = {
"cell_count": 8,
"distribution": {
"a:mixed_forest": {"cell_count": 1},
"b:mixed_forest": {"cell_count": 1},
"c:mixed_forest": {"cell_count": 1},
"d:mixed_forest": {"cell_count": 2},
"a:open_water": {"cell_count": 1},
"b:shrub": {"cell_count": 1},
"c:barren_land": {"cell_count": 1}
}
}
census3 = census2.copy()
def fn(cell, cell_count):
return simulate_cell_day(7, 0.107, cell, cell_count)
simulate_water_quality(census1, 93, fn, precolumbian=True)
simulate_water_quality(census2, 93, fn, precolumbian=True)
simulate_water_quality(census3, 93, fn, precolumbian=False)
for key in set(census1.keys()) - set(['distribution']):
self.assertAlmostEqual(census1[key], census2[key])
for key in set(census1.keys()) - set(['distribution']):
self.assertAlmostEqual(census2[key], census3[key])
def test_day_1(self):
"""
Test the simulate_day function.
"""
self.maxDiff = None
precip = 2
actual = simulate_day(CENSUS_1, precip)
expected = DAY_OUTPUT_1
self.assertEqual(actual, expected)
def test_day_2(self):
"""
Test the simulate_day function with lots of BMPs.
"""
precip = 2
actual = simulate_day(CENSUS_2, precip)
expected = DAY_OUTPUT_2
self.assertEqual(actual, expected)
def test_day_with_invalid_census(self):
"""
Test the simulate_day function with a census
that has a modification census with a cover type
that doesn't exist within the AoI census. This is
invalid input. Each land cover type in a modification
census must be represented in AoI census.
"""
census = {
'distribution': {
'b:developed_med': {'cell_count': 400},
},
'cell_count': 400,
'modifications': [
{
'distribution': {
'b:developed_low': {'cell_count': 40}
},
'cell_count': 40,
'change': ':deciduous_forest:'
},
]
}
precip = 3
self.assertRaises(ValueError,
simulate_day, *(census, precip))
def test_bmp_runoff(self):
"""
Make sure that BMPs do not produce negative runoff.
"""
census = {
"cell_count": 1,
"distribution": {
"d:developed_med": {"cell_count": 1}
},
"modifications": [
{
"change": "::green_roof",
"cell_count": 1,
"distribution": {
"d:developed_med": {"cell_count": 1}
}
}
]
}
result = simulate_day(census, 0.984)
self.assertTrue(result['modified']['runoff'] >= 0)
def test_bmp_sum(self):
"""
Make sure that runoff, evapotranspiration, and infiltration sum to
precipitation.
"""
census = {
"cell_count": 1,
"distribution": {
"d:developed_med": {"cell_count": 1}
},
"modifications": [
{
"change": "::green_roof",
"cell_count": 1,
"distribution": {
"d:developed_med": {"cell_count": 1}
}
}
]
}
precip = 0.984
result = simulate_day(census, precip)
runoff = result['modified']['runoff']
et = result['modified']['et']
inf = result['modified']['inf']
total = runoff + et +inf
self.assertAlmostEqual(total, precip)
def test_bmps_on_d(self):
"""
Make sure that BMPS all work on soil type D.
"""
census = {
"cell_count": 2,
"distribution": {
"c:developed_med": {"cell_count": 1},
"d:developed_med": {"cell_count": 1}
},
"modifications": [
{
"change": "::porous_paving",
"cell_count": 1,
"distribution": {
"c:developed_med": {"cell_count": 1}
}
},
{
"change": "::porous_paving",
"cell_count": 1,
"distribution": {
"d:developed_med": {"cell_count": 1}
}
}
]
}
# Porous Paving
precip = 3.3
result = simulate_day(census, precip)
c_inf = result['modified']['distribution']['c:developed_med']['inf']
d_inf = result['modified']['distribution']['d:developed_med']['inf']
self.assertAlmostEqual(c_inf / 3, d_inf)
# Rain Garden
census['modifications'][0]['change'] = '::rain_garden'
census['modifications'][1]['change'] = '::rain_garden'
result = simulate_day(census, precip)
c_inf = result['modified']['distribution']['c:developed_med']['inf']
d_inf = result['modified']['distribution']['d:developed_med']['inf']
self.assertLess(d_inf, c_inf)
self.assertGreater(d_inf / c_inf, 0.5)
# Infiltration Trench
census['modifications'][0]['change'] = '::infiltration_trench'
census['modifications'][1]['change'] = '::infiltration_trench'
result = simulate_day(census, precip)
c_inf = result['modified']['distribution']['c:developed_med']['inf']
d_inf = result['modified']['distribution']['d:developed_med']['inf']
self.assertAlmostEqual(c_inf / 3, d_inf)
if __name__ == "__main__":
unittest.main()
| |
import json
import datetime
import paramiko
import psycopg2
from pyinfraboxutils import get_logger, get_env, print_stackdriver
from pyinfraboxutils.db import connect_db
logger = get_logger("gerrit")
def main():
get_env('INFRABOX_SERVICE')
get_env('INFRABOX_VERSION')
get_env('INFRABOX_DATABASE_DB')
get_env('INFRABOX_DATABASE_USER')
get_env('INFRABOX_DATABASE_PASSWORD')
get_env('INFRABOX_DATABASE_HOST')
get_env('INFRABOX_DATABASE_PORT')
gerrit_port = int(get_env('INFRABOX_GERRIT_PORT'))
gerrit_hostname = get_env('INFRABOX_GERRIT_HOSTNAME')
gerrit_username = get_env('INFRABOX_GERRIT_USERNAME')
gerrit_key_filename = get_env('INFRABOX_GERRIT_KEY_FILENAME')
conn = connect_db()
logger.info("Connected to db")
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(username=gerrit_username,
hostname=gerrit_hostname,
port=gerrit_port,
key_filename=gerrit_key_filename)
client.get_transport().set_keepalive(60)
logger.info("Connected to gerrit")
_, stdout, _ = client.exec_command('gerrit stream-events')
logger.info("Waiting for stream-events")
for line in stdout:
for _ in range(0, 2):
try:
event = json.loads(line)
if event['type'] in ("patchset-created", "draft-published"):
logger.info(json.dumps(event, indent=4))
handle_patchset_created(conn, event)
break
except psycopg2.OperationalError:
try:
conn.close()
except:
pass
conn = connect_db()
logger.info("reconnected to db")
def handle_patchset_created_project(conn, event, project_id, project_name):
if event['patchSet']['isDraft']:
return
c = conn.cursor()
c.execute('SELECT id FROM repository WHERE project_id = %s', [project_id])
result = c.fetchone()
c.close()
repository_id = result[0]
sha = event['patchSet']['revision']
logger.info("Repository ID: %s", repository_id)
c = conn.cursor()
c.execute('SELECT * FROM "commit" WHERE project_id = %s and id = %s', [project_id, sha])
result = c.fetchone()
c.close()
commit = result
if not commit:
c = conn.cursor()
c.execute('''
INSERT INTO "commit" (
id, message, repository_id, timestamp,
author_name, author_email, author_username,
committer_name, committer_email, committer_username, url, branch, project_id, tag)
VALUES (%s, %s, %s,
%s, %s, %s,
%s, %s, %s,
%s, %s, %s, %s, %s)
RETURNING *
''', (sha, event['change']['commitMessage'],
repository_id, datetime.datetime.now(),
event['change']['owner'].get('name', 'unknown'),
'', event['change']['owner']['username'], '', '', '',
event['change']['url'],
event['change']['branch'], project_id, None))
result = c.fetchone()
c.close()
commit = result
c = conn.cursor()
c.execute('''
SELECT max(build_number) + 1 AS build_no
FROM build AS b
WHERE b.project_id = %s''', [project_id])
result = c.fetchone()
c.close()
build_no = result[0]
if not build_no:
build_no = 1
c = conn.cursor()
c.execute('''INSERT INTO build (commit_id, build_number, project_id)
VALUES (%s, %s, %s)
RETURNING id''', (sha, build_no, project_id))
result = c.fetchone()
c.close()
build_id = result[0]
env_vars = {
"GERRIT_PATCHSET_UPLOADER_USERNAME": event['patchSet']['uploader']['username'],
"GERRIT_PATCHSET_UPLOADER_NAME": event['patchSet']['uploader'].get('name', None),
"GERRIT_PATCHSET_UPLOADER_EMAIL": event['patchSet']['uploader']['email'],
"GERRIT_PATCHSET_NUMBER": event['patchSet']['number'],
"GERRIT_PATCHSET_REF": event['patchSet']['ref'],
"GERRIT_REFSPEC": event['patchSet']['ref'],
"GERRIT_PATCHSET_REVISION": event['patchSet']['revision'],
"GERRIT_CHANGE_STATUS": event['change']['status'],
"GERRIT_CHANGE_URL": event['change']['url'],
"GERRIT_CHANGE_COMMIT_MESSAGE": event['change']['commitMessage'],
"GERRIT_CHANGE_NUMBER": event['change']['number'],
"GERRIT_CHANGE_PROJECT": event['change']['project'],
"GERRIT_PROJECT": event['change']['project'],
"GERRIT_CHANGE_BRANCH": event['change']['branch'],
"GERRIT_CHANGE_ID": event['change']['id'],
"GERRIT_CHANGE_SUBJECT": event['change']['subject'],
"GERRIT_CHANGE_OWNER_USERNAME": event['change']['owner']['username'],
"GERRIT_CHANGE_OWNER_NAME": event['change']['owner'].get('name', None),
"GERRIT_CHANGE_OWNER_EMAIL": event['change']['owner']['email'],
"GERRIT_UPLOADER_USERNAME": event['uploader']['username'],
"GERRIT_UPLOADER_NAME": event['uploader'].get('name', None),
"GERRIT_UPLOADER_EMAIL": event['uploader']['email']
}
git_repo = {
"commit": sha,
"clone_url": "ssh://%s@%s:%s/%s" % (get_env('INFRABOX_GERRIT_USERNAME'),
get_env('INFRABOX_GERRIT_HOSTNAME'),
get_env('INFRABOX_GERRIT_PORT'),
project_name),
"ref": event['patchSet']['ref'],
"event": event['change']['branch']
}
c = conn.cursor()
c.execute('''INSERT INTO job (id, state, build_id, type, name,
project_id, build_only, dockerfile,
cpu, memory, repo, env_var, cluster_name)
VALUES (gen_random_uuid(), 'queued', %s, 'create_job_matrix', 'Create Jobs',
%s, false, '', 1, 1024, %s, %s, 'master')''', (build_id,
project_id,
json.dumps(git_repo),
json.dumps(env_vars)))
def handle_patchset_created(conn, event):
conn.rollback()
project_name = event.get('project', None)
if not project_name:
project_name = event['change'].get('project', None)
if not project_name:
logger.error('Failed to get project from event')
return
logger.info("Project name: %s", project_name)
# Get project
c = conn.cursor()
c.execute("SELECT id FROM project WHERE name = %s AND type='gerrit'", [project_name])
projects = c.fetchall()
c.close()
logger.info("Found projects in db: %s", json.dumps(projects))
if not projects:
return
for project in projects:
project_id = project[0]
logger.info("Handling project with id: %s", project_id)
handle_patchset_created_project(conn, event, project_id, project_name)
conn.commit()
if __name__ == "__main__":
try:
main()
except:
print_stackdriver()
| |
import ipaddress
from flask_restful import Resource, reqparse, abort, inputs, fields, marshal
from flask_login import login_required
from twisted.internet.defer import inlineCallbacks, returnValue
from crochet import wait_for, TimeoutError
from floranet.models.gateway import Gateway
from floranet.log import log
# Crochet timeout. If the code block does not complete within this time,
# a TimeoutError exception is raised.
from __init__ import TIMEOUT
class GatewayResource(Resource):
"""Gateway resource base class.
Attributes:
restapi (RestApi): Flask Restful API object
server (NetServer): FloraNet network server object
fields (dict): Dictionary of attributes to be returned to a REST request
parser (RequestParser): Flask RESTful request parser
args (dict): Parsed request argument
"""
def __init__(self, **kwargs):
self.restapi = kwargs['restapi']
self.server = kwargs['server']
self.fields = {
'host': fields.String,
'eui': fields.Integer,
'name': fields.String,
'enabled': fields.Boolean,
'power': fields.Integer,
'created': fields.DateTime(dt_format='iso8601'),
'updated': fields.DateTime(dt_format='iso8601')
}
self.parser = reqparse.RequestParser(bundle_errors=True)
self.parser.add_argument('host', type=str)
self.parser.add_argument('eui', type=int)
self.parser.add_argument('name', type=str)
self.parser.add_argument('enabled', type=inputs.boolean)
self.parser.add_argument('power', type=int)
self.args = self.parser.parse_args()
class RestGateway(GatewayResource):
"""RestGateway Resource class.
Manages RESTAPI GET and PUT transactions for gateways.
"""
def __init__(self, **kwargs):
super(RestGateway, self).__init__(**kwargs)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def get(self, host):
"""Method to handle gateway GET requests"""
try:
g = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if g is None:
abort(404, message={'error': "Gateway {} doesn't exist.".format(host)})
returnValue(marshal(g, self.fields))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def put(self, host):
"""Method to handle gateway PUT requests
Args:
host (str): Gateway host address
"""
try:
gateway = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if gateway is None:
abort(404, message={'error': "Gateway {} doesn't exist".format(host)})
kwargs = {}
for a,v in self.args.items():
if v is not None and v != getattr(gateway, a):
kwargs[a] = v
setattr(gateway, a, v)
(valid, message) = yield gateway.valid()
if not valid:
abort(400, message=message)
# Update the gateway and server with the new attributes
if kwargs:
gateway.update(**kwargs)
self.server.lora.updateGateway(host, gateway)
returnValue(({}, 200))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def delete(self, host):
"""Method to handle gateway DELETE requests
Args:
host (str): Gateway host
"""
try:
g = yield Gateway.find(where=['host = ?', host], limit=1)
# Return a 404 if not found.
if g is None:
abort(404, message={'error': "Gateway {} doesn't exist.".format(host)})
deleted = yield g.delete()
self.server.lora.deleteGateway(g)
returnValue(({}, 200))
except TimeoutError:
log.error("REST API timeout retrieving gateway {host}",
host=host)
class RestGateways(GatewayResource):
""" RestGateways Resource class.
Manages REST API GET and POST transactions for reading multiple gateways,
and creating gateways.
"""
def __init__(self, **kwargs):
super(RestGateways, self).__init__(**kwargs)
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def get(self):
"""Method to get all gateways"""
try:
gateways = yield Gateway.all()
if gateways is None:
returnValue({})
data = {}
for i,g in enumerate(gateways):
data[i] = marshal(g, self.fields)
returnValue(data)
except TimeoutError:
# Exception returns 500 to client
log.error("REST API timeout retrieving all gateways")
@login_required
@wait_for(timeout=TIMEOUT)
@inlineCallbacks
def post(self):
"""Method to create a gateway"""
host = self.args['host']
name = self.args['name']
eui = self.args['eui']
enabled = self.args['enabled']
power = self.args['power']
message = {}
# Check for required args
required = {'host', 'name', 'eui', 'enabled', 'power'}
for r in required:
if self.args[r] is None:
message[r] = "Missing the {} parameter.".format(r)
if message:
abort(400, message=message)
# Ensure we have a valid address
try:
ipaddress.ip_address(host)
except (ipaddress.AddressValueError, ValueError):
message = {'error': "Invalid IP address {} ".format(host)}
abort(400, message=message)
# Ensure we have a valid EUI
if not isinstance(eui, (int, long)):
message = {'error': "Invalid gateway EUI {} ".format(eui)}
abort(400, message=message)
# Check this gateway does not currently exist
exists = yield Gateway.exists(where=['host = ?', host])
if exists:
message = {'error': "Gateway address {} ".format(host) + \
"currently exists."}
abort(400, message=message)
# Check the EUI does not currently exist
exists = yield Gateway.exists(where=['eui = ?', eui])
if exists:
message = {'error': "Gateway EUI {} ".format(eui) + \
"currently exists."}
abort(400, message=message)
# Create and validate
gateway = Gateway(host=host, eui=eui, name=name, enabled=enabled, power=power)
(valid, message) = gateway.valid()
if not valid:
abort(400, message=message)
try:
g = yield gateway.save()
if g is None:
abort(500, message={'error': "Error saving the gateway."})
# Add the new gateway to the server.
self.server.lora.addGateway(g)
location = self.restapi.api.prefix + '/gateway/' + str(host)
returnValue(({}, 201, {'Location': location}))
except TimeoutError:
# Exception returns 500 to client
log.error("REST API timeout for gateway POST request")
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Transforms a float-trained graph into an equivalent quantized version.
An example of command-line usage is:
bazel build tensorflow/tools/quantization:quantize_graph \
&& bazel-bin/tensorflow/tools/quantization/quantize_graph \
--input=tensorflow_inception_graph.pb
--output_node_names="softmax2" --print_nodes --output=/tmp/quantized_graph.pb \
--mode=eightbit --logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean("print_nodes", False, """Lists all nodes in the model.""")
flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""")
flags.DEFINE_string("output_node_names", "",
"""Output node names, comma separated.""")
flags.DEFINE_string("output", "", """File to save the output graph to.""")
flags.DEFINE_integer("bitdepth", 8,
"""How many bits to quantize the graph to.""")
flags.DEFINE_string("mode", "round",
"""What transformation to apply (round, quantize,"""
""" eightbit, weights, or weights_rounded).""")
flags.DEFINE_string("test_input_dims", "1,224,224,3",
"""The size of the input tensor to use when testing a"""
""" graph loaded from a file.""")
flags.DEFINE_boolean("strip_redundant_quantization", True,
"""Removes redundant dequantize/quantize pairs.""")
flags.DEFINE_boolean("quantized_input", False,
"If true, assume Placeholders are quantized with values "
"covering [--quantized_input_min,--quantized_input_max]. "
"Only supported when --mode=eightbit")
flags.DEFINE_float("quantized_input_min", 0,
"The minimum of the actual input range when "
"--quantized_input")
flags.DEFINE_float("quantized_input_max", 1,
"The maximum of the actual input range when "
"--quantized_input")
flags.DEFINE_float(
"quantized_fallback_min", None,
"The fallback 'min' value to use for layers which lack min-max "
"information. Note: this should be considered a coarse tool just good "
"enough for experimentation purposes, since graphs quantized in this way "
"would be very inaccurate.")
flags.DEFINE_float(
"quantized_fallback_max", None,
"The fallback 'max' value to use for layers which lack min-max "
"information. Note: this should be considered a coarse tool just good "
"enough for experimentation purposes, since graphs quantized in this way "
"would be very inaccurate.")
def print_input_nodes(current_node, nodes_map, indent, already_visited):
print(" " * indent + current_node.op + ":" + current_node.name)
already_visited[current_node.name] = True
for input_node_name in current_node.input:
if input_node_name in already_visited:
continue
input_node = nodes_map[input_node_name]
print_input_nodes(input_node, nodes_map, indent + 1, already_visited)
def create_node(op, name, inputs):
new_node = tf.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node(name, value, dtype, shape=None):
node = create_node("Const", name, [])
set_attr_dtype(node, "dtype", dtype)
set_attr_tensor(node, "value", value, dtype, shape)
return node
def copy_attr(node, key, attr_value):
try:
node.attr[key].CopyFrom(attr_value)
except KeyError:
pass
def set_attr_dtype(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(type=value.as_datatype_enum))
except KeyError:
pass
def set_attr_shape(node, key, value):
try:
node.attr[key].CopyFrom(
tf.AttrValue(shape=tensor_shape.as_shape(value).as_proto()))
except KeyError:
pass
def set_attr_tensor(node, key, value, dtype, shape=None):
try:
node.attr[key].CopyFrom(tf.AttrValue(
tensor=tensor_util.make_tensor_proto(value,
dtype=dtype,
shape=shape)))
except KeyError:
pass
def set_attr_string(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(s=value))
except KeyError:
pass
def set_attr_int_list(node, key, value):
list_value = tf.AttrValue.ListValue(i=value)
try:
node.attr[key].CopyFrom(tf.AttrValue(list=list_value))
except KeyError:
pass
def set_attr_bool(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(b=value))
except KeyError:
pass
def set_attr_int(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(i=value))
except KeyError:
pass
def set_attr_float(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(f=value))
except KeyError:
pass
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def ensure_tensor_name_has_port(node_name):
"""Makes sure that a tensor name has :0 if no explicit port exists."""
m = re.search(r"(.*):\d+$", node_name)
if m:
name_with_port = node_name
else:
name_with_port = node_name + ":0"
return name_with_port
def unique_node_name_from_input(node_name):
"""Replaces invalid characters in input names to get a unique node name."""
return node_name.replace(":", "__port__").replace("^", "__hat__")
def quantize_array(arr, num_buckets):
"""Quantizes a numpy array.
This function maps each scalar in arr to the center of one of num_buckets
buckets. For instance,
quantize_array([0, 0.3, 0.6, 1], 2) => [0.25, 0.25, 0.75, 0.75]
Args:
arr: The numpy array to quantize.
num_buckets: The number of buckets to map "var" to.
Returns:
The quantized numpy array.
Raises:
ValueError: when num_buckets < 1.
"""
if num_buckets < 1:
raise ValueError("num_buckets must be >= 1")
arr_max = arr.max()
arr_min = arr.min()
if arr_max == arr_min:
return arr
bucket_width = (arr_max - arr_min) / num_buckets
# Map scalars to bucket indices. Take special care of max(arr).
bucket_indices = np.floor((arr - arr_min) / bucket_width)
bucket_indices[bucket_indices == num_buckets] = num_buckets - 1
# Map each scalar to the center of a bucket.
arr = arr_min + bucket_width * (bucket_indices + 0.5)
return arr
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by introducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
return [create_constant_node(input_node.name, tensor_value_rounded,
tf.float32, shape=tensor_shape_list)]
def quantize_weight_eightbit(input_node, quantization_mode):
"""Returns replacement nodes for input_node using the Dequantize op."""
base_name = input_node.name + "_"
quint8_const_name = base_name + "quint8_const"
min_name = base_name + "min"
max_name = base_name + "max"
float_tensor = tensor_util.MakeNdarray(
input_node.attr["value"].tensor)
min_value = np.min(float_tensor.flatten())
max_value = np.max(float_tensor.flatten())
# min_value == max_value is a tricky case. It can occur for general
# tensors, and of course for scalars. The quantized ops cannot deal
# with this case, so we set max_value to something else.
# It's a tricky question what is the numerically best solution to
# deal with this degeneracy.
# TODO(petewarden): Better use a tolerance than a hard comparison?
if min_value == max_value:
if abs(min_value) < 0.000001:
max_value = min_value + 1.0
elif min_value > 0:
max_value = 2 * min_value
else:
max_value = min_value / 2.0
sess = tf.Session()
with sess.as_default():
quantize_op = tf.contrib.quantization.python.quantize_v2(
float_tensor,
min_value,
max_value,
tf.quint8,
mode=quantization_mode)
quint8_tensor = quantize_op[0].eval()
shape = tensor_util.TensorShapeProtoToList(input_node.attr[
"value"].tensor.tensor_shape)
quint8_const_node = create_constant_node(quint8_const_name,
quint8_tensor,
tf.quint8,
shape=shape)
min_node = create_constant_node(min_name, min_value, tf.float32)
max_node = create_constant_node(max_name, max_value, tf.float32)
dequantize_node = create_node("Dequantize", input_node.name,
[quint8_const_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", quantization_mode)
return [quint8_const_node, min_node, max_node, dequantize_node]
EightbitizeRecursionState = collections.namedtuple(
"EightbitizeRecursionState", ["already_visited", "output_node_stack",
"merged_with_fake_quant"])
class GraphRewriter(object):
"""Takes a float graph, and rewrites it in quantized form."""
def __init__(self, input_graph, mode, quantized_input_range,
fallback_quantization_range=None):
"""Sets up the class to rewrite a float graph.
Args:
input_graph: A float graph to transform.
mode: A string controlling how quantization is performed -
round, quantize, eightbit, or weights.
quantized_input_range: if set, assume the input is
quantized and represents the range
[quantized_input_range[0], quantized_input_range[1]]
fallback_quantization_range: if set, then for nodes where the quantization
range can't be inferred from the graph, use the range
[fallback_quantization_range[0], fallback_quantization_range[1]) instead
of using a RequantizationRange node in the graph.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
self.input_graph = input_graph
self.nodes_map = self.create_nodes_map(input_graph)
self.output_graph = None
self.mode = mode
self.final_node_renames = {}
if quantized_input_range:
self.input_range = (quantized_input_range[0], quantized_input_range[1])
if self.input_range[0] >= self.input_range[1]:
raise ValueError("Invalid quantized_input_range: [%s,%s]" %
self.input_range)
if self.mode != "eightbit":
raise ValueError(
"quantized_input_range can only be specified in eightbit mode")
else:
self.input_range = None
if fallback_quantization_range:
self.fallback_quantization_range = [fallback_quantization_range[0],
fallback_quantization_range[1]]
if (self.fallback_quantization_range[0] >=
self.fallback_quantization_range[1]):
raise ValueError("Invalid fallback_quantization_range: [%s,%s]" %
self.fallback_quantization_range)
if self.mode != "eightbit":
raise ValueError(
"fallback_quantization_range can only be "
"specified in eightbit mode")
else:
self.fallback_quantization_range = None
# Data that is valid only during the recursive call to rewrite the graph.
self.state = None
def create_nodes_map(self, graph):
"""Builds a mapping of node names to their defs from the graph."""
nodes_map = {}
for node in graph.node:
if node.name not in nodes_map.keys():
nodes_map[node.name] = node
else:
raise ValueError("Duplicate node names detected.")
return nodes_map
def rewrite(self, output_node_names):
"""Triggers rewriting of the float graph.
Args:
output_node_names: A list of names of the nodes that produce the final
results.
Returns:
A quantized version of the float graph.
"""
self.output_graph = tf.GraphDef()
output_nodes = [self.nodes_map[output_node_name]
for output_node_name in output_node_names]
if self.mode == "round":
self.already_visited = {}
for output_node in output_nodes:
self.round_nodes_recursively(output_node)
elif self.mode == "quantize":
self.already_visited = {}
self.already_quantized = {}
for output_node in output_nodes:
self.quantize_nodes_recursively(output_node)
elif self.mode == "eightbit":
self.set_input_graph(graph_util.remove_training_nodes(self.input_graph))
output_nodes = [self.nodes_map[output_node_name]
for output_node_name in output_node_names]
self.state = EightbitizeRecursionState(already_visited={},
output_node_stack=[],
merged_with_fake_quant={})
for output_node in output_nodes:
self.eightbitize_nodes_recursively(output_node)
self.state = None
if self.input_range:
self.add_output_graph_node(create_constant_node(
"quantized_input_min_value", self.input_range[0], tf.float32, []))
self.add_output_graph_node(create_constant_node(
"quantized_input_max_value", self.input_range[1], tf.float32, []))
if self.fallback_quantization_range:
self.add_output_graph_node(create_constant_node(
"fallback_quantization_min_value",
self.fallback_quantization_range[0], tf.float32, []))
self.add_output_graph_node(create_constant_node(
"fallback_quantization_max_value",
self.fallback_quantization_range[1], tf.float32, []))
if FLAGS.strip_redundant_quantization:
self.output_graph = self.remove_redundant_quantization(
self.output_graph)
self.remove_dead_nodes(output_node_names)
self.apply_final_node_renames()
elif self.mode == "weights":
self.output_graph = self.quantize_weights(self.input_graph,
b"MIN_COMBINED")
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights_rounded":
self.output_graph = self.quantize_weights(self.input_graph, self.mode)
self.remove_dead_nodes(output_node_names)
else:
print("Bad mode - " + self.mode + ".")
return self.output_graph
def round_nodes_recursively(self, current_node):
"""The entry point for simple rounding quantization."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = current_node.name + "_original"
self.add_output_graph_node(new_node)
levels = 1 << FLAGS.bitdepth
constant_name = current_node.name + "_round_depth"
constant_tensor = tf.constant(levels, dtype=tf.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = tf.NodeDef()
quantize_node.op = "RoundToSteps"
quantize_node.name = current_node.name
quantize_node.input.extend([current_node.name + "_original"])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_nodes_recursively(self, current_node):
"""The entry point for quantizing nodes to eight bit and back."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.quantize_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
for input_name in current_node.input:
input_name = node_name_from_input(input_name)
input_node = self.nodes_map[input_name]
self.quantize_node(input_node)
self.quantize_node(current_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_node(self, input_node):
"""Handles quantizing a single node."""
input_name = input_node.name
if input_name in self.already_quantized:
return
self.already_quantized[input_name] = True
original_input_name = input_name + "_original"
reshape_name = input_name + "_reshape"
reshape_dims_name = input_name + "_reshape_dims"
max_name = input_name + "_max"
min_name = input_name + "_min"
dims_name = input_name + "_dims"
quantize_name = input_name + "_quantize"
dequantize_name = input_name
original_input_node = tf.NodeDef()
original_input_node.CopyFrom(input_node)
original_input_node.name = original_input_name
self.add_output_graph_node(original_input_node)
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reshape_node = create_node("Reshape", reshape_name, [original_input_name,
reshape_dims_name])
set_attr_dtype(reshape_node, "T", tf.float32)
self.add_output_graph_node(reshape_node)
dims_node = create_constant_node(dims_name, 0, tf.int32, [1])
self.add_output_graph_node(dims_node)
max_node = create_node("Max", max_name, [reshape_name, dims_name])
set_attr_dtype(max_node, "T", tf.float32)
set_attr_bool(max_node, "keep_dims", False)
self.add_output_graph_node(max_node)
min_node = create_node("Min", min_name, [reshape_name, dims_name])
set_attr_dtype(min_node, "T", tf.float32)
set_attr_bool(min_node, "keep_dims", False)
self.add_output_graph_node(min_node)
quantize_node = create_node("Quantize", quantize_name, [original_input_name,
min_name, max_name])
set_attr_dtype(quantize_node, "T", tf.quint8)
set_attr_string(quantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_node)
dequantize_node = create_node("Dequantize", dequantize_name,
[quantize_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def should_merge_with_fake_quant_node(self):
"""Should the current node merge with self.state.output_node_stack[-1]?"""
if not self.state.output_node_stack: return False
top = self.state.output_node_stack[-1]
return top[1] == 0 and top[0].op in ["FakeQuantWithMinMaxVars"]
def should_quantize_const(self, node):
if not self.state.output_node_stack: return False
top = self.state.output_node_stack[-1]
if not top[2]: return False
assert tf.as_dtype(node.attr["dtype"].type) == tf.float32, (
"Quantizing constant %s" % node.name)
return True
def eightbitize_nodes_recursively(self, current_node):
"""The entry point for transforming a graph into full eight bit."""
if current_node.name in self.state.already_visited:
if (self.should_merge_with_fake_quant_node() or
current_node.name in self.state.merged_with_fake_quant):
raise ValueError("Unsupported graph structure: output of node %s "
"is processed by a FakeQuant* node and should have "
"no other outputs.", current_node.name)
return
self.state.already_visited[current_node.name] = True
for i, input_node_name in enumerate(current_node.input):
quantize_input = False
if current_node.op in ("MatMul", "Conv2D", "BiasAdd", "MaxPool",
"AvgPool", "Relu", "Relu6",
"BatchNormWithGlobalNormalization"):
quantize_input = True
elif current_node.op == "Concat" and i > 0:
quantize_input = True
elif current_node.op == "Reshape" and i == 0:
quantize_input = True
self.state.output_node_stack.append((current_node, i, quantize_input))
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.eightbitize_nodes_recursively(input_node)
self.state.output_node_stack.pop()
if current_node.op == "MatMul":
self.eightbitize_mat_mul_node(current_node)
elif current_node.op == "Conv2D":
self.eightbitize_conv_node(current_node)
elif current_node.op == "BiasAdd":
self.eightbitize_bias_add_node(current_node)
elif current_node.op == "MaxPool" or current_node.op == "AvgPool":
self.eightbitize_single_input_tensor_node(current_node,
self.add_pool_function)
elif current_node.op == "Relu" or current_node.op == "Relu6":
self.eightbitize_single_input_tensor_node(current_node,
self.add_relu_function)
elif current_node.op == "Concat":
self.eightbitize_concat_node(current_node)
elif current_node.op == "BatchNormWithGlobalNormalization":
self.eightbitize_batch_norm_node(current_node)
elif current_node.op == "Reshape":
self.eightbitize_reshape_node(current_node)
elif (self.input_range and
current_node.op in ("Placeholder", "PlaceholderV2")):
self.eightbitize_placeholder_node(current_node)
elif current_node.op == "FakeQuantWithMinMaxVars":
# It will have been merged into the underlying node.
pass
elif current_node.op == "Const":
if self.should_quantize_const(current_node):
for n in quantize_weight_eightbit(current_node, b"MIN_FIRST"):
self.add_output_graph_node(n)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
###################################################################
# Note: if more cases are added here, you may need to update the op
# name lists in the loop over children at the start of the function.
###################################################################
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
if (self.should_merge_with_fake_quant_node() and
current_node.name not in self.state.merged_with_fake_quant):
raise ValueError(
"FakeQuant* node %s failed to merge with node %s of type %s" % (
self.state.output_node_stack[-1][0], current_node.name,
current_node.op))
def add_eightbit_prologue_nodes(self, original_node):
"""Adds input conversion nodes to handle quantizing the underlying node."""
namespace_prefix = original_node.name + "_eightbit"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
input_names = []
min_max_names = []
for original_input_name in original_node.input:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
return all_input_names
def add_common_quantization_nodes(self, namespace_prefix):
"""Builds constant nodes needed for quantization of inputs."""
reshape_dims_name = namespace_prefix + "_reshape_dims"
reduction_dims_name = namespace_prefix + "_reduction_dims"
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = create_constant_node(reduction_dims_name, 0, tf.int32,
[1])
self.add_output_graph_node(reduction_dims_node)
return reshape_dims_name, reduction_dims_name
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = create_node("Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, "T", tf.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node("Min", min_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(min_input_node, "T", tf.float32)
set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node("Max", max_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(max_input_node, "T", tf.float32)
set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node("QuantizeV2", quantize_input_name,
[original_input_name, min_input_name,
max_input_name])
set_attr_dtype(quantize_input_node, "T", tf.quint8)
set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
return quantize_input_name, min_output_name, max_output_name
def add_quantize_down_nodes(self, original_node, quantized_output_name):
quantized_outputs = [
quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"
]
min_max_inputs = None
if self.should_merge_with_fake_quant_node():
# Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
# Requantize.
fake_quant_node = self.state.output_node_stack[-1][0]
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
assert original_node.name not in self.state.merged_with_fake_quant
self.state.merged_with_fake_quant[original_node.name] = True
elif self.fallback_quantization_range:
min_max_inputs = ["fallback_quantization_min_value:0",
"fallback_quantization_max_value:0"]
else:
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = create_node(
"RequantizationRange", original_node.name + "_eightbit_requant_range",
quantized_outputs)
set_attr_dtype(requant_range_node, "Tinput", tf.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [requant_range_node.name + ":0",
requant_range_node.name + ":1"]
requantize_node = create_node(
"Requantize", original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
set_attr_dtype(requantize_node, "Tinput", tf.qint32)
set_attr_dtype(requantize_node, "out_type", tf.quint8)
self.add_output_graph_node(requantize_node)
return requantize_node.name
def add_dequantize_result_node(self, quantized_output_name,
original_node_name, min_tensor_index=1):
min_max_inputs = [
"%s:%s" % (quantized_output_name, min_tensor_index),
"%s:%s" % (quantized_output_name, (min_tensor_index + 1))]
dequantize_name = original_node_name
if self.should_merge_with_fake_quant_node():
fake_quant_node = self.state.output_node_stack[-1][0]
if original_node_name not in self.state.merged_with_fake_quant:
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
self.state.merged_with_fake_quant[original_node_name] = True
dequantize_name = fake_quant_node.name
dequantize_node = create_node(
"Dequantize", dequantize_name,
[quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node(
"QuantizedMatMul", quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", tf.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_conv_node(self, original_node):
"""Replaces a Conv2D node with the eight bit equivalent sub-graph."""
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
all_input_names)
copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
set_attr_dtype(quantized_conv_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_conv_node, "Tfilter", tf.quint8)
set_attr_dtype(quantized_conv_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (original_node.name +
"_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node(
"QuantizedBiasAdd", quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_single_input_tensor_node(self, original_node,
add_op_function):
"""Replaces a single-tensor node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input(f)
| |
+--------v v
Operation
|
v
(f)
Into a quantized equivalent:
Input(f) ReshapeDims
+------v v-------------+
| Reshape
| |
| | ReductionDims
| +-----+ |
| | +---c---------+
| v v v v-------+
| Min Max
| +----+ |
v v v--------+
Quantize
|
v
QuantizedOperation
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
add_op_function: Function to create the actual node.
Returns:
Subgraph representing the quantized version of the original node.
"""
quantized_op_name = original_node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + original_node.op
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_op_node = create_node(
quantized_op_type, quantized_op_name, all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self.add_dequantize_result_node(quantized_op_name, original_node.name)
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "T", tf.quint8)
copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
copy_attr(quantized_op_node, "padding", original_node.attr["padding"])
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "Tinput", tf.quint8)
def eightbitize_concat_node(self, original_node):
"""Replaces a Concat node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input0(f) Input1(f)
| | |
+--------v v v----------+
Concat
|
v
(f)
Into a quantized equivalent:
Shape(f) Input0(f) ReshapeDims Input1(f)
| +------v v--------------+------------------v v------+
| | Reshape Reshape |
| | | | |
| | | ReductionDims | |
| | +------+ | +--------+ |
| | | +---c---------+-----------c-----+ | |
| | +v v v v-------+---------v v v v+ |
| | Min Max Min Max |
| | +----+ | | +-----+ |
| v v v--------+ +----------v v v
| Quantize Quantize
| +------------------+ +----------------------+
+-------------------------------+ | |
v v v
QuantizedConcat
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_concat_name = namespace_prefix + "_quantized_concat"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[0]
original_inputs = original_node.input[1:]
input_names = []
min_names = []
max_names = []
for original_input_name in original_inputs:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_names.append(min_input_name)
max_names.append(max_input_name)
all_input_names = [shape_input_name]
all_input_names.extend(input_names)
all_input_names.extend(min_names)
all_input_names.extend(max_names)
quantized_concat_node = create_node(
"QuantizedConcat", quantized_concat_name, all_input_names)
set_attr_int(quantized_concat_node, "N", len(original_inputs))
set_attr_dtype(quantized_concat_node, "T", tf.quint8)
self.add_output_graph_node(quantized_concat_node)
self.add_dequantize_result_node(quantized_concat_name, original_node.name)
def eightbitize_placeholder_node(self, current_node):
"""Replaces a placeholder node with a quint8 placeholder node+dequantize."""
name = current_node.name
# Convert the placeholder into a quantized type.
output_node = tf.NodeDef()
output_node.CopyFrom(current_node)
set_attr_dtype(output_node, "dtype", tf.quint8)
output_node.name += "_original_input"
self.add_output_graph_node(output_node)
# Add a dequantize to convert back to float.
dequantize_node = create_node(
"Dequantize", name,
[output_node.name, "quantized_input_min_value",
"quantized_input_max_value"])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
# For the descent over the graph to work, the dequantize node must be named
# current_node.name. However, for the feeding of the graph to work, the
# placeholder must have the name current_node.name; so record a final set
# of renames to apply after all processing has been done.
self.final_node_renames[output_node.name] = name
self.final_node_renames[dequantize_node.name] = name + "_dequantize"
def eightbitize_reshape_node(self, original_node):
"""Replaces a Reshape node with the eight bit equivalent sub-graph.
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_reshape_name = namespace_prefix + "_quantized_reshape"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[1]
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_node.input[0],
reshape_dims_name, reduction_dims_name))
quantized_reshape_node = create_node(
"QuantizedReshape", quantized_reshape_name,
[quantize_input_name, shape_input_name, min_input_name, max_input_name])
set_attr_dtype(quantized_reshape_node, "T", tf.quint8)
self.add_output_graph_node(quantized_reshape_node)
self.add_dequantize_result_node(quantized_reshape_name, original_node.name)
def eightbitize_batch_norm_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
namespace_prefix = original_node.name + "_eightbit"
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name))
quantize_mean_name, min_mean_name, max_mean_name = (
self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
reshape_dims_name, reduction_dims_name))
quantize_variance_name, min_variance_name, max_variance_name = (
self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
reshape_dims_name, reduction_dims_name))
quantize_beta_name, min_beta_name, max_beta_name = (
self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
reshape_dims_name, reduction_dims_name))
quantize_gamma_name, min_gamma_name, max_gamma_name = (
self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
reshape_dims_name, reduction_dims_name))
quantized_batch_norm_node = create_node(
"QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
[quantize_input_name, min_input_name, max_input_name,
quantize_mean_name, min_mean_name, max_mean_name,
quantize_variance_name, min_variance_name, max_variance_name,
quantize_beta_name, min_beta_name, max_beta_name, quantize_gamma_name,
min_gamma_name, max_gamma_name])
set_attr_dtype(quantized_batch_norm_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_batch_norm_node, "out_type", tf.qint32)
copy_attr(quantized_batch_norm_node, "scale_after_normalization",
original_node.attr["scale_after_normalization"])
copy_attr(quantized_batch_norm_node, "variance_epsilon",
original_node.attr["variance_epsilon"])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def add_output_graph_node(self, output_node):
"""Inserts one node into the new graph."""
self.output_graph.node.extend([output_node])
def remove_redundant_quantization(self, old_graph):
"""Removes unneeded pairs of quantize/dequantize ops from the graph.
This is a bit of a tricky function, because it's attempting to spot the
pattern of dequantizing from eight-bit up to float, and then immediately
quantizing back down to eight bits again, that's introduced by previous
passes that do 'key-hole' conversions of individual nodes but have to
convert back to float to match the previous output interface, since they
don't know that the next op can handle quantized tensors.
It works by:
- Looking for Quantize nodes.
- Checking to see if their first input is a Dequantize node.
- Seeing if their min/max inputs come from Min/Max nodes.
- Making sure those Min/Max nodes are being fed from the same Dequantize.
- Or that the Min is indirectly being fed from the same Dequantize as Max.
- Making sure the Dequantize is going through a Reshape (which we add
during the previous pass when we create the quantize sub-graph).
- Looking for the dims Const op for the Min/Max dims.
If all of these conditions are met, then it's a sub-graph pattern that
we know how to optimize out (and is likely the common one we've introduced).
We then rewire the graph to skip it entirely, and then rely on the dead node
removal pass to get rid of any nodes that are no longer needed.
Args:
old_graph: The model we'll be stripping redundant nodes from.
Returns:
A graph with the unnecessary nodes removed.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = tf.GraphDef()
inputs_to_rename = {}
# We go through all the nodes, looking for any that match the patterns we
# know how to optimize away.
for node in old_graph.node:
# We always start with a Quantize node, and examine its inputs to see if
# they are in a form that can be removed.
if node.op not in ["Quantize", "QuantizeV2"]:
continue
dequantize_node_name = node_name_from_input(node.input[0])
if dequantize_node_name not in old_nodes_map:
raise ValueError("Input node name '" + dequantize_node_name +
"' not found in node '" + node.name + "'")
dequantize_node = old_nodes_map[dequantize_node_name]
# Do we have a Dequantize feeding in, with the same type as the Quantize?
if dequantize_node.op != "Dequantize":
continue
if node.attr["T"] != dequantize_node.attr["T"]:
continue
# Now look at the other inputs, and ensure they're Min/Max nodes.
min_node_name = node_name_from_input(node.input[1])
max_node_name = node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ["Min", "Dequantize"])
is_max_right_type = (max_node.op in ["Max", "Dequantize"])
if not is_min_right_type or not is_max_right_type:
print("Didn't find expected types on inputs : %s, %s." % (
min_node.op, max_node.op))
continue
min_node_input_name = node_name_from_input(min_node.input[0])
max_node_input_name = node_name_from_input(max_node.input[0])
# There are two different patterns for Min nodes we can recognize, one
# where the input comes directly from the same one as the Max, and
# another where we run it through another Min first, so check for both.
is_same_input = False
if min_node_input_name == max_node_input_name:
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if first_min_node_input.op == "Concat":
second_min_node_name = node_name_from_input(
first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if second_min_node.op == "Min":
second_min_node_input_name = node_name_from_input(
second_min_node.input[0])
is_same_input = (second_min_node_input_name == max_node_input_name)
if not is_same_input:
print("Different min/max inputs: " + min_node_input_name)
continue
# We recognize this pattern, so mark the graph edges to be rewired to
# route around it entirely, since we know it's a no-op.
dequantize_source_name = node_name_from_input(dequantize_node.input[0])
node_tensor_name = ensure_tensor_name_has_port(node.name)
min_tensor_name = node.name + ":1"
max_tensor_name = node.name + ":2"
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
# Finally we apply all the rewiring we've marked to the graph.
for node in old_graph.node:
for index, input_full_name in enumerate(node.input):
input_name = ensure_tensor_name_has_port(input_full_name)
if input_name in inputs_to_rename:
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def apply_final_node_renames(self):
"""Applies node renames in self.final_node_renames to self.output_graph."""
old_graph = self.output_graph
self.output_graph = tf.GraphDef()
for node in old_graph.node:
node.name = self.final_node_renames.get(node.name, node.name)
for index, input_name in enumerate(node.input):
node_name = node_name_from_input(input_name)
input_full_name = ensure_tensor_name_has_port(input_name)
if node_name in self.final_node_renames:
node.input[index] = "%s%s" % (self.final_node_renames[node_name],
input_full_name[len(node_name):])
self.add_output_graph_node(node)
return self.output_graph
def remove_dead_nodes(self, output_names):
"""Removes nodes that are no longer needed for inference from the graph."""
old_output_graph = self.output_graph
self.output_graph = graph_util.extract_sub_graph(old_output_graph,
output_names)
def quantize_weights(self, input_graph, quantization_mode):
"""Quantize float Const ops.
There are two modes of operations, both replace float Const ops with
quantized values.
1. If quantization_mode is "weights_rounded", this function replaces float
Const ops with quantized float Const ops - same as the original op, but
float values being mapped to the center of one of 1<<FLAGS.bitdepth buckets.
This does not change the raw model size, but compression algorithms such as
zip (as used for compressing apks) or bzip2 will achieve a very good
compression ratio.
2. For other quantization modes ("MIN_COMBINED" or "MIN_FIRST"), float
Const ops are quantized and replaced by a tuple of four ops to perform
the dequantization at runtime:
* eight-bit Const (bucket indices, same shape as original float Const op
* two float Const ops (min and max value of original float Const op)
* Dequantize op to convert the eight-bit consts to float tensors.
The quantization mode is important because we see accuracy problems when
quantizing weights for different situations depending on the algorithm
used. We haven't figured out exactly what the underlying cause is yet,
unfortunately.
Args:
input_graph: A GraphDef of the model containing float Const ops.
quantization_mode: How to quantize and dequantize the values.
Returns:
A GraphDef of the converted graph.
Raises:
ValueError: If quantization_mode is unsupported.
"""
output_graph = tf.GraphDef()
for input_node in input_graph.node:
should_quantize = False
if input_node.op == "Const":
dtype = tf.as_dtype(input_node.attr["dtype"].type)
if dtype == tf.float32:
should_quantize = True
if should_quantize:
if quantization_mode == "weights_rounded":
output_graph.node.extend(quantize_weight_rounded(input_node))
elif quantization_mode in (b"MIN_COMBINED", b"MIN_FIRST"):
output_graph.node.extend(quantize_weight_eightbit(input_node,
quantization_mode))
else:
raise ValueError("Unsupported quantization mode %s." %
quantization_mode)
else:
output_node = tf.NodeDef()
output_node.CopyFrom(input_node)
output_graph.node.extend([output_node])
return output_graph
def set_input_graph(self, new_input_graph):
self.input_graph = new_input_graph
self.nodes_map = self.create_nodes_map(self.input_graph)
def main(unused_args):
if not tf.gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
known_modes = ["round", "quantize", "eightbit", "weights", "test",
"weights_rounded"]
if not any(FLAGS.mode in s for s in known_modes):
print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
".")
return -1
tf_graph = tf.GraphDef()
with tf.gfile.Open(FLAGS.input, "rb") as f:
data = f.read()
tf_graph.ParseFromString(data)
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(tf_graph, input_map={}, name="")
quantized_input_range = None
if FLAGS.quantized_input:
quantized_input_range = [FLAGS.quantized_input_min,
FLAGS.quantized_input_max]
fallback_quantization_range = None
if (FLAGS.quantized_fallback_min is not None or
FLAGS.quantized_fallback_max is not None):
assert FLAGS.quantized_fallback_min is not None
assert FLAGS.quantized_fallback_max is not None
fallback_quantization_range = [FLAGS.quantized_fallback_min,
FLAGS.quantized_fallback_max]
rewriter = GraphRewriter(tf_graph, FLAGS.mode, quantized_input_range,
fallback_quantization_range)
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))
f = tf.gfile.FastGFile(FLAGS.output, "wb")
f.write(output_graph.SerializeToString())
return 0
if __name__ == "__main__":
tf.app.run()
| |
from __future__ import print_function
from distutils.version import LooseVersion
import warnings
import numpy as np
import pandas as pd
def _flatten_multi_geoms(geoms, colors=None):
"""
Returns Series like geoms and colors, except that any Multi geometries
are split into their components and colors are repeated for all component
in the same Multi geometry. Maintains 1:1 matching of geometry to color.
Passing `color` is optional, and when no `color` is passed a list of None
values is returned as `component_colors`.
"Colors" are treated opaquely and so can actually contain any values.
Returns
-------
components : list of geometry
component_colors : list of whatever type `colors` contains
"""
if colors is None:
colors = [None] * len(geoms)
components, component_colors = [], []
if not geoms.geom_type.str.startswith('Multi').any():
return geoms, colors
# precondition, so zip can't short-circuit
assert len(geoms) == len(colors)
for geom, color in zip(geoms, colors):
if geom.type.startswith('Multi'):
for poly in geom:
components.append(poly)
# repeat same color for all components
component_colors.append(color)
else:
components.append(geom)
component_colors.append(color)
return components, component_colors
def plot_polygon_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None, **kwargs):
"""
Plots a collection of Polygon and MultiPolygon geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
Otherwise follows `color` / `facecolor` kwargs.
edgecolor : single color or sequence of `N` colors
Color for the edge of the polygons
facecolor : single color or sequence of `N` colors
Color to fill the polygons. Cannot be used together with `values`.
color : single color or sequence of `N` colors
Sets both `edgecolor` and `facecolor`
**kwargs
Additional keyword arguments passed to the collection
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
try:
from descartes.patch import PolygonPatch
except ImportError:
raise ImportError("The descartes package is required"
" for plotting polygons in geopandas.")
from matplotlib.collections import PatchCollection
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
# PatchCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
# color=None overwrites specified facecolor/edgecolor with default color
if color is not None:
kwargs['color'] = color
collection = PatchCollection([PolygonPatch(poly) for poly in geoms],
**kwargs)
if values is not None:
collection.set_array(np.asarray(values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_linestring_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None, **kwargs):
"""
Plots a collection of LineString and MultiLineString geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be
mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
color : single color or sequence of `N` colors
Cannot be used together with `values`.
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
from matplotlib.collections import LineCollection
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
# LineCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
# color=None gives black instead of default color cycle
if color is not None:
kwargs['color'] = color
segments = [np.array(linestring)[:, :2] for linestring in geoms]
collection = LineCollection(segments, **kwargs)
if values is not None:
collection.set_array(np.asarray(values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_point_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None,
marker='o', markersize=None, **kwargs):
"""
Plots a collection of Point and MultiPoint geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : sequence of `N` Points or MultiPoints
values : a sequence of `N` values, optional
Values mapped to colors using vmin, vmax, and cmap.
Cannot be specified together with `color`.
markersize : scalar or array-like, optional
Size of the markers. Note that under the hood ``scatter`` is
used, so the specified value will be proportional to the
area of the marker (size in points^2).
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
if values is not None and color is not None:
raise ValueError("Can only specify one of 'values' and 'color' kwargs")
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
x = [p.x for p in geoms]
y = [p.y for p in geoms]
# matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None
if values is not None:
kwargs['c'] = values
if markersize is not None:
kwargs['s'] = markersize
collection = ax.scatter(x, y, color=color, vmin=vmin, vmax=vmax, cmap=cmap,
marker=marker, **kwargs)
return collection
def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
"""
Plot a GeoSeries.
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
s : Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
cmap : str (default None)
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if 'colormap' in style_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = style_kwds.pop('colormap')
if 'axes' in style_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = style_kwds.pop('axes')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
if s.empty:
warnings.warn("The GeoSeries you are attempting to plot is "
"empty. Nothing has been displayed.", UserWarning)
return ax
# if cmap is specified, create range of colors based on cmap
values = None
if cmap is not None:
values = np.arange(len(s))
if hasattr(cmap, 'N'):
values = values % cmap.N
style_kwds['vmin'] = style_kwds.get('vmin', values.min())
style_kwds['vmax'] = style_kwds.get('vmax', values.max())
geom_types = s.geometry.type
poly_idx = np.asarray((geom_types == 'Polygon')
| (geom_types == 'MultiPolygon'))
line_idx = np.asarray((geom_types == 'LineString')
| (geom_types == 'MultiLineString'))
point_idx = np.asarray((geom_types == 'Point')
| (geom_types == 'MultiPoint'))
# plot all Polygons and all MultiPolygon components in the same collection
polys = s.geometry[poly_idx]
if not polys.empty:
# color overrides both face and edgecolor. As we want people to be
# able to use edgecolor as well, pass color to facecolor
facecolor = style_kwds.pop('facecolor', None)
if color is not None:
facecolor = color
values_ = values[poly_idx] if cmap else None
plot_polygon_collection(ax, polys, values_, facecolor=facecolor,
cmap=cmap, **style_kwds)
# plot all LineStrings and MultiLineString components in same collection
lines = s.geometry[line_idx]
if not lines.empty:
values_ = values[line_idx] if cmap else None
plot_linestring_collection(ax, lines, values_, color=color, cmap=cmap,
**style_kwds)
# plot all Points in the same collection
points = s.geometry[point_idx]
if not points.empty:
values_ = values[point_idx] if cmap else None
plot_point_collection(ax, points, values_, color=color, cmap=cmap,
**style_kwds)
plt.draw()
return ax
def plot_dataframe(df, column=None, cmap=None, color=None, ax=None,
categorical=False, legend=False, scheme=None, k=5,
vmin=None, vmax=None, markersize=None, figsize=None,
legend_kwds=None, **style_kwds):
"""
Plot a GeoDataFrame.
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str, np.array, pd.Series (default None)
The name of the dataframe column, np.array, or pd.Series to be plotted.
If np.array or pd.Series are used then it must have same length as
dataframe. Values are used to color the plot. Ignored if `color` is
also set.
cmap : str (default None)
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default False)
Plot a legend. Ignored if no `column` is given, or if `color` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires PySAL).
A pysal.esda.mapclassify.Map_Classifier object will be used
under the hood. Supported schemes: 'Equal_interval', 'Quantiles',
'Fisher_Jenks'
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
markersize : str or float or sequence (default None)
Only applies to point geometries within a frame.
If a str, will use the values in the column of the frame specified
by markersize to set the size of markers. Otherwise can be a value
to apply to all points, or a sequence of the same length as the
number of points.
figsize : tuple of integers (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
legend_kwds : dict (default None)
Keyword arguments to pass to ax.legend()
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if 'colormap' in style_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = style_kwds.pop('colormap')
if 'axes' in style_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = style_kwds.pop('axes')
if column is not None and color is not None:
warnings.warn("Only specify one of 'column' or 'color'. Using "
"'color'.", UserWarning)
column = None
import matplotlib
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
if df.empty:
warnings.warn("The GeoDataFrame you are attempting to plot is "
"empty. Nothing has been displayed.", UserWarning)
return ax
if isinstance(markersize, str):
markersize = df[markersize].values
if column is None:
return plot_series(df.geometry, cmap=cmap, color=color, ax=ax,
figsize=figsize, markersize=markersize,
**style_kwds)
# To accept pd.Series and np.arrays as column
if isinstance(column, (np.ndarray, pd.Series)):
if column.shape[0] != df.shape[0]:
raise ValueError("The dataframe and given column have different "
"number of rows.")
else:
values = np.asarray(column)
else:
values = np.asarray(df[column])
if values.dtype is np.dtype('O'):
categorical = True
# Define `values` as a Series
if categorical:
if cmap is None:
if LooseVersion(matplotlib.__version__) >= '2.0.1':
cmap = 'tab10'
elif LooseVersion(matplotlib.__version__) >= '2.0.0':
# Erroneous name.
cmap = 'Vega10'
else:
cmap = 'Set1'
categories = list(set(values))
categories.sort()
valuemap = dict((k, v) for (v, k) in enumerate(categories))
values = np.array([valuemap[k] for k in values])
if scheme is not None:
binning = __pysal_choro(values, scheme, k=k)
# set categorical to True for creating the legend
categorical = True
binedges = [values.min()] + binning.bins.tolist()
categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
for i in range(len(binedges)-1)]
values = np.array(binning.yb)
mn = values.min() if vmin is None else vmin
mx = values.max() if vmax is None else vmax
geom_types = df.geometry.type
poly_idx = np.asarray((geom_types == 'Polygon')
| (geom_types == 'MultiPolygon'))
line_idx = np.asarray((geom_types == 'LineString')
| (geom_types == 'MultiLineString'))
point_idx = np.asarray((geom_types == 'Point')
| (geom_types == 'MultiPoint'))
# plot all Polygons and all MultiPolygon components in the same collection
polys = df.geometry[poly_idx]
if not polys.empty:
plot_polygon_collection(ax, polys, values[poly_idx],
vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
# plot all LineStrings and MultiLineString components in same collection
lines = df.geometry[line_idx]
if not lines.empty:
plot_linestring_collection(ax, lines, values[line_idx],
vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
# plot all Points in the same collection
points = df.geometry[point_idx]
if not points.empty:
if isinstance(markersize, np.ndarray):
markersize = markersize[point_idx]
plot_point_collection(ax, points, values[point_idx], vmin=mn, vmax=mx,
markersize=markersize, cmap=cmap,
**style_kwds)
if legend and not color:
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(
Line2D([0], [0], linestyle="none", marker="o",
alpha=style_kwds.get('alpha', 1), markersize=10,
markerfacecolor=n_cmap.to_rgba(value)))
if legend_kwds is None:
legend_kwds = {}
legend_kwds.setdefault('numpoints', 1)
legend_kwds.setdefault('loc', 'best')
ax.legend(patches, categories, **legend_kwds)
else:
n_cmap.set_array([])
ax.get_figure().colorbar(n_cmap, ax=ax)
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
"""
Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme : str
One of pysal.esda.mapclassify classification schemes
Options are 'Equal_interval', 'Quantiles', 'Fisher_Jenks'
k : int
number of classes (2 <= k <=9)
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
from pysal.esda.mapclassify import (
Quantiles, Equal_Interval, Fisher_Jenks)
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
scheme = scheme.lower()
if scheme not in schemes:
raise ValueError("Invalid scheme. Scheme must be in the"
" set: %r" % schemes.keys())
binning = schemes[scheme](values, k)
return binning
except ImportError:
raise ImportError("PySAL is required to use the 'scheme' keyword")
| |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The VCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework.test_framework import VCoinTestFramework
from test_framework.util import *
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many many transactions without needing to spend
# time signing.
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
SCRIPT_SIG = ["0451025175", "0451025275"]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'''
Create and send a transaction with a random fee.
The transaction pays to a trival P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
'''
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
inputs = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
outputs = {}
outputs[P2SH_1] = total_in - amount - fee
outputs[P2SH_2] = amount
rawtx = from_node.createrawtransaction(inputs, outputs)
# Createrawtransaction constructions a transaction that is ready to be signed
# These transactions don't need to be signed, but we still have to insert the ScriptSig
# that will satisfy the ScriptPubKey.
completetx = rawtx[0:10]
inputnum = 0
for inp in inputs:
completetx += rawtx[10+82*inputnum:82+82*inputnum]
completetx += SCRIPT_SIG[inp["vout"]]
completetx += rawtx[84+82*inputnum:92+82*inputnum]
inputnum += 1
completetx += rawtx[10+82*inputnum:]
txid = from_node.sendrawtransaction(completetx, True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (completetx, fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
'''
We need to generate a lot of very small inputs so we can generate a ton of transactions
and they will have low priority.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
'''
prevtxout = txins.pop()
inputs = []
outputs = {}
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
outputs[P2SH_1] = half_change
outputs[P2SH_2] = rem_change
rawtx = from_node.createrawtransaction(inputs, outputs)
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the property ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(rawtx)["hex"]
else :
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
'''
This function calls estimatefee and verifies that the estimates
meet certain invariants.
'''
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in filter(lambda x: x >= 0, all_estimates):
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
# estimatesmartfee should return the same result
assert_equal(node.estimatesmartfee(i+1)["feerate"], e)
else:
invalid_estimates += 1
# estimatesmartfee should still be valid
approx_estimate = node.estimatesmartfee(i+1)["feerate"]
answer_found = node.estimatesmartfee(i+1)["blocks"]
assert(approx_estimate > 0)
assert(answer_found > i+1)
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(VCoinTestFramework):
def setup_network(self):
'''
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of small low priority outputs
which we will use to generate our transactions.
'''
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-relaypriority=0", "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
print("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate,
# and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=17000",
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex)/2)/1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3],.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3],.1)
#update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
print("Will output estimates for 1/2/3/6/15/25 blocks")
for i in xrange(2):
print("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 14)
print("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3],.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| |
# -*- coding: utf-8 -*-
# pylint: disable-msg=too-few-public-methods, redefined-outer-name, no-self-use
"""This file contains the classes used to perform integration tests on the
methods in the SoCo class. They access a real Sonos system.
PLEASE TAKE NOTE: All of these tests are designed to run on a Sonos system
without interfering with normal service. This means that they must not raise
the volume or must leave the player in the same state as they found it in. They
have been made this way since SoCo is developed by volunteers who in all
likelihood do not have a dedicated test system. Accordingly the tests must not
annoy the neighbors, and should return the system to its original state so that
the developers can listen to their music while coding, without having it
interrupted at every unit test!
PLEASE RESPECT THIS.
"""
from __future__ import unicode_literals
import time
import pytest
import soco as soco_module
from soco.data_structures import (
DidlMusicTrack,
DidlPlaylistContainer,
SearchResult,
)
from soco.music_library import MusicLibrary
from soco.exceptions import SoCoUPnPException
# Mark all tests in this module with the pytest custom "integration" marker so
# they can be selected or deselected as a whole, eg:
# py.test -m "integration"
# or
# py.test -m "no integration"
pytestmark = pytest.mark.integration
@pytest.yield_fixture(scope='session')
def soco(request):
"""Set up and tear down the soco fixture used by all tests."""
# Get the ip address from the command line, and create the soco object
# Only one is used per test session, hence the decorator
ip = request.config.option.IP
if ip is None:
pytest.fail("No ip address specified. Use the --ip option.")
soco = soco_module.SoCo(ip)
# Check the device is playing and has items in the queue
if len(soco.get_queue()) == 0:
pytest.fail('Integration tests on the SoCo class must be run '
'with at least 1 item in the playlist.')
transport_info = soco.get_current_transport_info()
if transport_info['current_transport_state'] != 'PLAYING':
pytest.fail('Integration tests on the SoCo class must be run '
'with the Sonos unit playing.')
# Save the device's state
state = {'queue': soco.get_queue(0, 1000),
'current_track_info': soco.get_current_track_info()}
# Yield the device to the test function
yield soco
# Tear down. Restore state
soco.stop()
soco.clear_queue()
for track in state['queue']:
soco.add_to_queue(track)
soco.play_from_queue(
int(state['current_track_info']['playlist_position']) - 1)
soco.seek(state['current_track_info']['position'])
soco.play()
def wait(interval=0.1):
"""Convenience function to adjust sleep interval for all tests."""
time.sleep(interval)
class TestVolume(object):
"""Integration tests for the volume property."""
valid_values = range(101)
@pytest.yield_fixture(autouse=True)
def restore_volume(self, soco):
"""A fixture which restores volume after each test in the class is
run."""
old = soco.volume
yield
soco.volume = old
wait()
def test_get_and_set(self, soco):
"""Test if the set functionlity works when given valid arguments."""
old = soco.volume
assert old in self.valid_values
if old == self.valid_values[0]:
new = old + 1
else:
new = old - 1
soco.volume = new
wait()
assert soco.volume == new
def test_invalid_arguments(self, soco):
"""Test if the set functionality coerces into range when given integers
outside of allowed range."""
# NOTE We don't test coerce from too large values, since that would
# put the unit at full volume
soco.volume = self.valid_values[0] - 1
wait()
assert soco.volume == 0
def test_set_0(self):
""" Test whether the volume can be set to 0. Regression test for:
https://github.com/rahims/soco/issues/29
"""
soco.volume = 0
wait()
assert soco.volume == 0
class TestBass(object):
"""Integration tests for the bass property.
This class implements a full boundary value test.
"""
valid_values = range(-10, 11)
@pytest.yield_fixture(autouse=True)
def restore_bass(self, soco):
"""A fixture which restores bass EQ after each test in the class is
run."""
old = soco.bass
yield
soco.bass = old
wait()
def test_get_and_set(self, soco):
"""Test if the set functionlity works when given valid arguments."""
assert soco.bass in self.valid_values
# Values on the boundaries of the valid equivalence partition
for value in [self.valid_values[0], self.valid_values[-1]]:
soco.bass = value
wait()
assert soco.bass == value
def test_invalid_arguments(self, soco):
"""Test if the set functionality produces the expected "coerce in
range" functionality when given a value outside of its range."""
# Values on the boundaries of the two invalid equivalence partitions
soco.bass = self.valid_values[0] - 1
wait()
assert soco.bass == self.valid_values[0]
soco.bass = self.valid_values[-1] + 1
wait()
assert soco.bass == self.valid_values[-1]
class TestTreble(object):
"""Integration tests for the treble property.
This class implements a full boundary value test.
"""
valid_values = range(-10, 11)
@pytest.yield_fixture(autouse=True)
def restore_treble(self, soco):
"""A fixture which restores treble EQ after each test in the class is
run."""
old = soco.treble
yield
soco.treble = old
wait()
def test_get_and_set(self, soco):
"""Test if the set functionlity works when given valid arguments."""
assert soco.treble in self.valid_values
# Values on the boundaries of the valid equivalence partition
for value in [self.valid_values[0], self.valid_values[-1]]:
soco.treble = value
wait()
assert soco.treble == value
def test_invalid_arguments(self, soco):
"""Test if the set functionality produces the expected "coerce in
range" functionality when given a value outside its range."""
# Values on the boundaries of the two invalid equivalence partitions
soco.treble = self.valid_values[0] - 1
wait()
assert soco.treble == self.valid_values[0]
soco.treble = self.valid_values[-1] + 1
wait()
assert soco.treble == self.valid_values[-1]
class TestMute(object):
"""Integration test for the mute method."""
def test(self, soco):
"""Test if the mute method works."""
old = soco.mute
assert old is False, ('The unit should not be muted when running '
'the unit tests.')
soco.mute = True
wait()
new = soco.mute
assert new is True
soco.mute = False
wait()
assert soco.mute is False
class TestGetCurrentTransportInfo(object):
"""Integration test for the get_current_transport_info method."""
# The values in this list must be kept up to date with the values in
# the test doc string
transport_info_keys = sorted(['current_transport_status',
'current_transport_state',
'current_transport_speed'])
def test(self, soco):
""" Test if the return value is a dictionary that contains the keys:
current_transport_status, current_transport_state,
current_transport_speed and that values have been found for all keys,
i.e. they are not None.
"""
transport_info = soco.get_current_transport_info()
assert isinstance(transport_info, dict)
assert self.transport_info_keys == sorted(transport_info.keys())
for _, value in transport_info.items():
assert value is not None
class TestTransport(object):
"""Integration tests for transport methods (play, pause etc)."""
def test_pause_and_play(self, soco):
"""Test if the pause and play methods work."""
soco.pause()
wait(1)
on_pause = soco.get_current_transport_info()['current_transport_state']
assert on_pause == 'PAUSED_PLAYBACK'
soco.play()
wait(1)
on_play = soco.get_current_transport_info()['current_transport_state']
assert on_play == 'PLAYING'
def test_stop(self, soco):
"""Test if the stop method works."""
soco.stop()
wait(1)
new = soco.get_current_transport_info()['current_transport_state']
assert new == 'STOPPED'
soco.play()
wait(1)
on_play = soco.get_current_transport_info()['current_transport_state']
assert on_play == 'PLAYING'
def test_seek_valid(self, soco):
"""Test if the seek method works with valid input."""
original_position = soco.get_current_track_info()['position']
# Format 1
soco.seek('0:00:00')
wait()
position = soco.get_current_track_info()['position']
assert position in ['0:00:00', '0:00:01']
# Reset and format 2
soco.seek(original_position)
soco.seek('00:00:00')
wait()
position = soco.get_current_track_info()['position']
assert position in ['0:00:00', '0:00:01']
# Clean up
soco.seek(original_position)
wait()
def test_seek_invald(self, soco):
"""Test if the seek method properly fails with invalid input."""
for string in ['invalid_time_string', '5:12', '6', 'aa:aa:aa']:
with pytest.raises(ValueError):
soco.seek(string)
class TestGetCurrentTrackInfo(object):
"""Integration test for the get_current_track_info method."""
info_keys = sorted(['album', 'artist', 'title', 'uri', 'metadata',
'playlist_position', 'duration', 'album_art',
'position'])
def test_get(self, soco):
""" Test is the return value is a dictinary and contains the following
keys: album, artist, title, uri, playlist_position, duration,
album_art and position.
"""
info = soco.get_current_track_info()
assert isinstance(info, dict)
assert sorted(info.keys()) == self.info_keys
class TestGetSpeakerInfo(object):
"""Integration test for the get_speaker_info method."""
# The values in this list must be kept up to date with the values in
# the test doc string
info_keys = sorted(['zone_name', 'zone_icon', 'uid',
'serial_number', 'software_version',
'hardware_version', 'mac_address'])
def test(self, soco):
""" Test if the return value is a dictionary that contains the keys:
zone_name, zone_icon, uid, serial_number, software_version,
hardware_version, mac_address
and that values have been found for all keys, i.e. they are not None.
"""
speaker_info = soco.get_speaker_info()
assert isinstance(speaker_info, dict)
for _, value in speaker_info.items():
assert value is not None
# TODO: test GetSpeakersIp
class TestGetQueue(object):
"""Integration test for the get_queue method."""
# The values in this list must be kept up to date with the values in
# the test doc string
queue_element_keys = sorted(['album', 'creator', 'resources',
'album_art_uri', 'title'])
def test_get(self, soco):
""" Test is return value is a list of DidlMusicTracks and if each of
the objects contain the attributes: album, creator, resources,
album_art_uri and title.
"""
queue = soco.get_queue(0, 100)
assert isinstance(queue, list)
for item in queue:
assert isinstance(item, DidlMusicTrack)
for key in self.queue_element_keys:
assert getattr(item, key)
class TestAddToQueue(object):
"""Integration test for the add_to_queue method."""
def test_add_to_queue(self, soco):
"""Get the current queue, add the last item of the current queue and
then compare the length of the old queue with the new and check that
the last two elements are identical."""
old_queue = soco.get_queue(0, 1000)
# Add new element and check
assert (soco.add_to_queue(old_queue[-1])) == len(old_queue) + 1
wait()
new_queue = soco.get_queue()
assert (len(new_queue) - 1) == len(old_queue)
assert (new_queue[-1].title) == (new_queue[-2].title)
class TestRemoveFromQueue(object):
"""Integration test for the remove_from_queue method."""
def test(self, soco):
"""Test if the remove_from_queue method works."""
old_queue = soco.get_queue()
soco.remove_from_queue(len(old_queue) - 1) # queue index is 0 based
wait()
new_queue = soco.get_queue()
assert old_queue != new_queue, (
'No difference between '
'queues before and after removing the last item')
assert len(new_queue) == len(old_queue) - 1
class TestSonosPlaylist(object):
"""Integration tests for Sonos Playlist Management."""
existing_playlists = None
playlist_name = 'zSocoTestPlayList42'
@pytest.yield_fixture(autouse=True)
def restore_sonos_playlists(self, soco):
"""A fixture which cleans up after each sonos playlist test."""
if self.existing_playlists is None:
self.existing_playlists = soco.get_sonos_playlists()
if self.playlist_name in [x.title
for x in self.existing_playlists]:
msg = '%s is an existing playlist.' % self.playlist_name
pytest.fail(msg)
yield
for sonos_playlist in soco.get_sonos_playlists():
if sonos_playlist.title == self.playlist_name:
soco.remove_sonos_playlist(sonos_playlist=sonos_playlist)
def test_create(self, soco):
"""Test creating a new empty Sonos playlist."""
existing_playlists = {x.item_id for x in soco.get_sonos_playlists()}
new_playlist = soco.create_sonos_playlist(title=self.playlist_name)
assert type(new_playlist) is DidlPlaylistContainer
new_pl = {x.item_id for x in soco.get_sonos_playlists()}
assert new_pl != existing_playlists
assert new_pl - existing_playlists == {new_playlist.item_id}
def test_create_from_queue(self, soco):
"""Test creating a Sonos playlist from the current queue."""
playlist = soco.create_sonos_playlist_from_queue(self.playlist_name)
assert type(playlist) is DidlPlaylistContainer
prslt = soco.music_library.browse(ml_item=playlist)
qrslt = soco.get_queue()
assert len(prslt) == len(qrslt)
assert prslt.total_matches == qrslt.total_matches
assert prslt.number_returned == qrslt.number_returned
# compare uri because item_id is different, SQ:xx/n for playlist
for p_item, q_item in zip(prslt, qrslt):
assert p_item.resources[0].uri == q_item.resources[0].uri
def test_remove_playlist(self, soco):
"""Test removing a Sonos playlist."""
# a place holder, remove_sonos_playlist is exercised in the
# 'restore_sonos_playlists'
pass
def test_remove_playlist_itemid(self, soco):
"""Test removing a Sonos playlist by item_id."""
new_playlist = soco.create_sonos_playlist(title=self.playlist_name)
assert type(new_playlist) is DidlPlaylistContainer
assert soco.remove_sonos_playlist(new_playlist.item_id)
found = False
for sonos_playlist in soco.get_sonos_playlists():
if sonos_playlist.title == self.playlist_name:
found = True
break
assert found is False, "new_playlist was not removed by item_id"
def test_remove_playlist_bad_id(self, soco):
"""Test attempting to remove a Sonos playlist using a bad id."""
# junky bad
with pytest.raises(SoCoUPnPException):
soco.remove_sonos_playlist('SQ:-7')
# realistic non-existing
hpl_i = max([int(x.item_id.split(':')[1])
for x in soco.get_sonos_playlists()])
with pytest.raises(SoCoUPnPException):
soco.remove_sonos_playlist('SQ:{}'.format(hpl_i + 1))
class TestTimer(object):
"""Integration tests for timers on Sonos"""
existing_timer = None
@pytest.yield_fixture(autouse=True)
def restore_timer(self, soco):
"""A fixture which cleans up after each timer test."""
existing_timer = soco.get_sleep_timer()
yield
soco.set_sleep_timer(existing_timer)
def test_get_set_timer(self, soco):
"""Test setting the timer"""
assert soco.set_sleep_timer(7200) is None
result = soco.get_sleep_timer()
if not any(result == s for s in [ 7200, 7199, 7198 ]):
pytest.fail("Set timer to 7200, but sonos reports back time as %s" % result['RemainingSleepTimerDuration'])
class TestReorderSonosPlaylist(object):
"""Integration tests for Sonos Playlist Management."""
existing_playlists = None
playlist_name = 'zSocoTestPlayList42'
test_playlist = None
queue_length = None
@pytest.yield_fixture(autouse=True, scope="class")
def restore_sonos_playlists(self, soco):
"""A fixture which cleans up after each sonos playlist test."""
if self.existing_playlists is None:
self.existing_playlists = soco.get_sonos_playlists()
if self.playlist_name in [x.title for x in self.existing_playlists]:
msg = '%s is an existing playlist.' % self.playlist_name
pytest.fail(msg)
queue_list = soco.get_queue()
if len(queue_list) < 2:
msg = 'You must have 3 or more items in your queue for testing.'
pytest.fail(msg)
playlist = soco.create_sonos_playlist_from_queue(self.playlist_name)
self.__class__.queue_length = soco.queue_size
self.__class__.test_playlist = playlist
yield
soco.contentDirectory.DestroyObject(
[('ObjectID', self.test_playlist.item_id)]
)
def _reset_spl_contents(self, soco):
"""Ensure test playlist matches queue for each test."""
soco.contentDirectory.DestroyObject(
[('ObjectID', self.test_playlist.item_id)]
)
playlist = soco.create_sonos_playlist_from_queue(self.playlist_name)
self.__class__.test_playlist = playlist
return playlist, self.__class__.queue_length
def test_reverse_track_order(self, soco):
"""Test reversing the tracks in the Sonos playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = ','.join([str(x) for x in reversed(range(num_tracks))])
new_pos = ','.join([str(x) for x in range(num_tracks)])
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == 0
assert response['length'] == num_tracks
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
for s_item, q_item in zip(spl, reversed(soco.get_queue())):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_swap_first_two_items(self, soco):
"""Test a use case in doc string. Swapping the positions of the first
two tracks in the Sonos playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [0, ]
new_pos = [1, ]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == 0
assert response['length'] == num_tracks
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_first_track(self, soco):
"""Test removing first track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [0, ]
new_pos = [None, ]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks - 1
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[1:]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_first_track_full(self, soco):
"""Test removing first track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [0] + list(range(num_tracks - 1)) # [0, 0, 1, ..., n-1]
new_pos = [None, ] + list(range(num_tracks - 1)) # [None, 0, ..., n-1]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks - 1
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[1:]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_last_track(self, soco):
"""Test removing last track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = range(num_tracks)
new_pos = list(range(num_tracks - 1)) + [None, ]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks - 1
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[:-1]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_between_track(self, soco):
"""Test removing a middle track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
ndx = int(num_tracks / 2)
tracks = [ndx]
new_pos = [None]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks - 1
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
del que[ndx]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_some_tracks(self, soco): # pylint: disable=R0914
"""Test removing some tracks from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
# get rid of the even numbered tracks
tracks = sorted([x for x in range(num_tracks) if not x & 1],
reverse=True)
new_pos = [None for _ in tracks]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1 * len(new_pos)
assert response['length'] == num_tracks + response['change']
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
for ndx in tracks:
del que[ndx]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_all_tracks(self, soco):
"""Test removing all tracks from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
# get rid of the even numbered tracks
tracks = sorted(range(num_tracks), reverse=True)
new_pos = [None for _ in tracks]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1 * num_tracks
assert response['length'] == num_tracks + response['change']
assert response['length'] == 0
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_reorder_and_remove_track(self, soco):
"""Test reorder and removing a track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [1, 2]
new_pos = [0, None]
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks + response['change']
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
def test_object_id_is_object(self, soco):
"""Test removing all tracks from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = sorted(range(num_tracks), reverse=True)
new_pos = [None for _ in tracks]
args = {'sonos_playlist': test_playlist,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1 * num_tracks
assert response['length'] == num_tracks + response['change']
assert response['length'] == 0
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_remove_all_string(self, soco):
"""Remove all in one op by using strings."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
# we know what we are doing
tracks = ','.join([str(x) for x in range(num_tracks)])
new_pos = ''
args = {'sonos_playlist': test_playlist,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1 * num_tracks
assert response['length'] == num_tracks + response['change']
assert response['length'] == 0
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_remove_and_reorder_string(self, soco):
"""test remove then reorder using string arguments."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = '0,2' # trackA, trackB, trackC, ...
new_pos = ',0' # trackC, trackB, ...
args = {'sonos_playlist': test_playlist,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks + response['change']
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[2].resources[0].uri
assert spl[1].resources[0].uri == que[1].resources[0].uri
def test_move_track_string(self, soco):
"""Test a simple move with strings."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = "0"
new_pos = "1"
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == 0
assert response['length'] == num_tracks
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_move_track_int(self, soco):
"""Test a simple move with ints."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = 1
new_pos = 0
args = {'sonos_playlist': test_playlist.item_id,
'tracks': tracks,
'new_pos': new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response['change'] == 0
assert response['length'] == num_tracks
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_clear_sonos_playlist(self, soco):
"""Test the clear_sonos_playlist helper function."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
response = soco.clear_sonos_playlist(test_playlist)
assert response['change'] == -1 * num_tracks
assert response['length'] == num_tracks + response['change']
assert response['length'] == 0
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_clear_empty_sonos_playlist(self, soco):
"""Test clearing an already empty Sonos playlist."""
test_playlist, _ = self._reset_spl_contents(soco)
response = soco.clear_sonos_playlist(test_playlist)
assert response['length'] == 0
update_id = response['update_id']
new_response = soco.clear_sonos_playlist(test_playlist,
update_id=update_id)
assert new_response['change'] == 0
assert new_response['length'] == 0
assert new_response['update_id'] == update_id
def test_move_in_sonos_playlist(self, soco):
"""Test method move_in_sonos_playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
args = {'sonos_playlist': test_playlist.item_id,
'track': 0,
'new_pos': 1}
response = soco.move_in_sonos_playlist(**args)
assert response['change'] == 0
assert response['length'] == num_tracks
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_from_sonos_playlist(self, soco):
"""Test remove_from_sonos_playlist method."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
args = {'sonos_playlist': test_playlist.item_id,
'track': 0}
response = soco.remove_from_sonos_playlist(**args)
assert response['change'] == -1
assert response['length'] == num_tracks - 1
assert response['update_id'] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[1:]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_get_sonos_playlist_by_attr(self, soco):
"""Test test_get_sonos_playlist_by_attr."""
test_playlist, _ = self._reset_spl_contents(soco)
by_name = soco.get_sonos_playlist_by_attr('title', self.playlist_name)
assert test_playlist.item_id == by_name.item_id
by_id = soco.get_sonos_playlist_by_attr('item_id',
test_playlist.item_id)
assert test_playlist.item_id == by_id.item_id
with pytest.raises(AttributeError):
soco.get_sonos_playlist_by_attr('fred', 'wilma')
with pytest.raises(ValueError):
soco.get_sonos_playlist_by_attr('item_id', 'wilma')
class TestMusicLibrary(object):
"""The the music library methods"""
search_types = list(MusicLibrary.SEARCH_TRANSLATION.keys())
specific_search_methods = (
"artists", "album_artists", "albums", "genres", "composers", "tracks",
"playlists", "sonos_favorites", "favorite_radio_stations",
"favorite_radio_shows",
)
@pytest.mark.parametrize("search_type", specific_search_methods)
def test_from_specific_search_methods(self, soco, search_type):
"""Test getting favorites from the music library"""
search_method = getattr(soco.music_library, "get_" + search_type)
search_result = search_method()
assert isinstance(search_result, SearchResult)
@pytest.mark.parametrize("search_type", search_types)
def test_music_library_information(self, soco, search_type):
"""Test getting favorites from the music library"""
search_result = soco.music_library.get_music_library_information(search_type)
assert isinstance(search_result, SearchResult)
| |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
from oslo_utils import importutils
import re
import testtools
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
config as cisco_config)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
constants as const)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_network_driver)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import constants
from networking_cisco.plugins.ml2.drivers.cisco.nexus import exceptions
from networking_cisco.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.common import constants as n_const
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.tests.unit import testlib_api
CONNECT_ERROR = 'Unable to connect to Nexus'
NEXUS_IP_ADDRESS = '1.1.1.1'
NEXUS_IP_ADDRESS_PC = '2.2.2.2'
NEXUS_IP_ADDRESS_DUAL = '3.3.3.3'
HOST_NAME_1 = 'testhost1'
HOST_NAME_2 = 'testhost2'
HOST_NAME_PC = 'testpchost'
HOST_NAME_DUAL = 'testdualhost'
INSTANCE_1 = 'testvm1'
INSTANCE_2 = 'testvm2'
INSTANCE_PC = 'testpcvm'
INSTANCE_DUAL = 'testdualvm'
NEXUS_PORT_1 = 'ethernet:1/10'
NEXUS_PORT_2 = 'ethernet:1/20'
NEXUS_PORTCHANNELS = 'portchannel:2'
NEXUS_DUAL = 'ethernet:1/3,portchannel:2'
VLAN_ID_1 = 267
VLAN_ID_2 = 265
VLAN_ID_PC = 268
VLAN_ID_DUAL = 269
VXLAN_ID = 70000
NO_VXLAN_ID = 0
MCAST_GROUP = '255.1.1.1'
DEVICE_OWNER_COMPUTE = 'compute:test'
DEVICE_OWNER_DHCP = n_const.DEVICE_OWNER_DHCP
NEXUS_SSH_PORT = '22'
PORT_STATE = n_const.PORT_STATUS_ACTIVE
NETWORK_TYPE = 'vlan'
NEXUS_VXLAN_NETWORK_TYPE = 'nexus_vxlan'
NEXUS_DRIVER = ('networking_cisco.plugins.ml2.drivers.cisco.nexus.'
'nexus_network_driver.CiscoNexusDriver')
class FakeNetworkContext(object):
"""Network context for testing purposes only."""
def __init__(self, segment_id, nw_type, mcast_group='physnet1'):
self._network_segments = {api.SEGMENTATION_ID: segment_id,
api.NETWORK_TYPE: nw_type,
const.PROVIDER_SEGMENT: False,
api.PHYSICAL_NETWORK: mcast_group}
@property
def network_segments(self):
return self._network_segments
class FakePortContext(object):
"""Port context for testing purposes only."""
def __init__(self, device_id, host_name, device_owner,
network_context, bottom_segment=None):
self._port = {
'status': PORT_STATE,
'device_id': device_id,
'device_owner': device_owner,
portbindings.HOST_ID: host_name,
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS
}
self._network = network_context
self._segment = network_context.network_segments
if bottom_segment is None:
self._bottom_segment = None
else:
self._bottom_segment = bottom_segment.network_segments
@property
def current(self):
return self._port
@property
def network(self):
return self._network
@property
def top_bound_segment(self):
return self._segment
@property
def bottom_bound_segment(self):
return self._bottom_segment
@property
def original_top_bound_segment(self):
return None
@property
def original_bottom_bound_segment(self):
return None
class TestCiscoNexusDevice(testlib_api.SqlTestCase):
"""Unit tests for Cisco ML2 Nexus device driver."""
TestConfigObj = collections.namedtuple(
'TestConfigObj',
'nexus_ip_addr host_name nexus_port instance_id vlan_id vxlan_id '
'mcast_group device_owner')
test_configs = {
'test_config1': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_1,
NEXUS_PORT_1,
INSTANCE_1,
VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_config2': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_2,
NEXUS_PORT_2,
INSTANCE_2,
VLAN_ID_2,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_config3': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_1,
NEXUS_PORT_1,
INSTANCE_2,
VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_config_portchannel': TestConfigObj(
NEXUS_IP_ADDRESS_PC,
HOST_NAME_PC,
NEXUS_PORTCHANNELS,
INSTANCE_PC,
VLAN_ID_PC,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_config_dual': TestConfigObj(
NEXUS_IP_ADDRESS_DUAL,
HOST_NAME_DUAL,
NEXUS_DUAL,
INSTANCE_DUAL,
VLAN_ID_DUAL,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_config_dhcp': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_1,
NEXUS_PORT_1,
INSTANCE_1,
VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_DHCP),
'test_vxlan_config1': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_1,
NEXUS_PORT_1,
INSTANCE_1,
VLAN_ID_1,
VXLAN_ID,
'225.1.1.1',
DEVICE_OWNER_COMPUTE),
}
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
super(TestCiscoNexusDevice, self).setUp()
# Use a mock netconf client
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
data_xml = {'connect.return_value.get.return_value.data_xml': ''}
self.mock_ncclient.configure_mock(**data_xml)
def new_nexus_init(mech_instance):
mech_instance.driver = importutils.import_object(NEXUS_DRIVER)
mech_instance.monitor_timeout = (
cisco_config.cfg.CONF.ml2_cisco.switch_heartbeat_time)
mech_instance._nexus_switches = {}
for name, config in TestCiscoNexusDevice.test_configs.items():
ip_addr = config.nexus_ip_addr
host_name = config.host_name
nexus_port = config.nexus_port
mech_instance._nexus_switches[(ip_addr,
host_name)] = nexus_port
mech_instance._nexus_switches[(ip_addr,
'ssh_port')] = NEXUS_SSH_PORT
mech_instance._nexus_switches[(ip_addr,
constants.USERNAME)] = 'admin'
mech_instance._nexus_switches[(ip_addr,
constants.PASSWORD)] = 'password'
mech_instance.driver.nexus_switches = (
mech_instance._nexus_switches)
mock.patch.object(mech_cisco_nexus.CiscoNexusMechanismDriver,
'__init__', new=new_nexus_init).start()
self._cisco_mech_driver = (mech_cisco_nexus.
CiscoNexusMechanismDriver())
def _verify_results(self, driver_result):
"""Verifies correct entries sent to Nexus."""
self.assertEqual(self.mock_ncclient.connect.return_value.
edit_config.call_count,
len(driver_result),
"Unexpected driver count")
for idx in xrange(0, len(driver_result)):
self.assertNotEqual(self.mock_ncclient.connect.
return_value.edit_config.mock_calls[idx][2]['config'],
None, "mock_data is None")
self.assertNotEqual(
re.search(driver_result[idx],
self.mock_ncclient.connect.return_value.
edit_config.mock_calls[idx][2]['config']),
None, "Expected result data not found")
def _create_port(self, port_config):
"""Tests creation and deletion of a virtual port."""
nexus_ip_addr = port_config.nexus_ip_addr
host_name = port_config.host_name
nexus_port = port_config.nexus_port
instance_id = port_config.instance_id
vlan_id = port_config.vlan_id
vxlan_id = port_config.vxlan_id
mcast_group = port_config.mcast_group
device_owner = port_config.device_owner
network_context = FakeNetworkContext(vlan_id, NETWORK_TYPE)
if vxlan_id != NO_VXLAN_ID:
bottom_context = network_context
network_context = FakeNetworkContext(vxlan_id,
NEXUS_VXLAN_NETWORK_TYPE, mcast_group)
else:
bottom_context = None
port_context = FakePortContext(instance_id, host_name,
device_owner, network_context, bottom_context)
self._cisco_mech_driver.update_port_precommit(port_context)
self._cisco_mech_driver.update_port_postcommit(port_context)
for port_id in nexus_port.split(','):
bindings = nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
self.assertEqual(len(bindings), 1)
def _delete_port(self, port_config):
"""Tests creation and deletion of a virtual port."""
nexus_ip_addr = port_config.nexus_ip_addr
host_name = port_config.host_name
nexus_port = port_config.nexus_port
instance_id = port_config.instance_id
vlan_id = port_config.vlan_id
vxlan_id = port_config.vxlan_id
mcast_group = port_config.mcast_group
device_owner = port_config.device_owner
network_context = FakeNetworkContext(vlan_id, NETWORK_TYPE)
if vxlan_id != NO_VXLAN_ID:
bottom_context = network_context
network_context = FakeNetworkContext(vxlan_id,
NEXUS_VXLAN_NETWORK_TYPE, mcast_group)
else:
bottom_context = None
port_context = FakePortContext(instance_id, host_name,
device_owner, network_context, bottom_context)
self._cisco_mech_driver.delete_port_precommit(port_context)
self._cisco_mech_driver.delete_port_postcommit(port_context)
for port_id in nexus_port.split(','):
with testtools.ExpectedException(
exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
def _create_delete_port(self, port_config):
"""Tests creation and deletion of a virtual port."""
self._create_port(port_config)
self._delete_port(port_config)
def _config_side_effects(self, match_config, exc):
"""Generates config-dependent side effect for ncclient.
This method was written to configure side_effects for both
ncclient edit_config and get_config drivers. In the case
of edit_config, the arguments target and config are passed
into _side_effect_method. In the case of get, the argument
filter is passed into _side_effect_method. For the sake of
simplicity, the _side_effect_method was written to handle
either case.
"""
keywords = match_config.split()
def _side_effect_method(target=None, config=None, filter=None):
if config is None:
config = filter[1]
if all(word in config for word in keywords):
raise exc
return _side_effect_method
def _create_port_failure(self, attr, match_str, test_case, test_id):
"""Verifies exception handling during initial create object.
This method is a shared method to initiate an exception
at various point of object creation. The points of failure
are identified by the caller which can be get operations or
edit operations. When local replay mechanism is not configured,
the exception should bubble up.
attr: Which mock attribute to contain side_effect exception
match_str: String for side_effect method to match for exception
test_case: which configuration test case to run thru test
test_id: String to put in the exception and verify contained
in exception string. This indicates it was indeed
our exception which was executed indicating test
indeed is complete
"""
config = {attr:
self._config_side_effects(match_str,
Exception(test_id))}
self.mock_ncclient.configure_mock(**config)
e = self.assertRaises(
exceptions.NexusConfigFailed,
self._create_port,
TestCiscoNexusDevice.test_configs[test_case])
self.assertIn(test_id, unicode(e))
def _delete_port_failure(self, attr, match_str, test_case, test_id):
"""Verifies exception handling during object deletion.
This method is a shared method to initiate an exception
at various point of object deletion. The points of failure
are identified by the caller which can be get operations or
edit operations. When local replay mechanism is not configured,
the exception should bubble up.
attr: Which mock attribute to contain side_effect exception
match_str: String for side_effect method to match for exception
test_case: which configuration test case to run thru test
test_id: String to put in the exception and verify contained
in exception string. This indicates it was indeed
our exception which was executed indicating test
indeed is complete
"""
self._create_port(
TestCiscoNexusDevice.test_configs[test_case])
config = {attr:
self._config_side_effects(match_str,
Exception(test_id))}
self.mock_ncclient.configure_mock(**config)
e = self.assertRaises(
exceptions.NexusConfigFailed,
self._delete_port,
TestCiscoNexusDevice.test_configs[test_case])
self.assertIn(test_id, unicode(e))
def test_create_delete_ports(self):
"""Tests creation and deletion of two new virtual Ports."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config1'])
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config2'])
def test_create_delete_duplicate_ports(self):
"""Tests creation and deletion of two new virtual Ports."""
duplicate_add_port_driver_result = [
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vstate\>active\<\/vstate>',
'\<no\>\s+\<shutdown\/\>\s+\<\/no\>',
'\<interface\>1\/10\<\/interface\>\s+'
'[\x20-\x7e]+\s+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<vlan_id\>267',
]
duplicate_delete_port_driver_result = [
'\<interface\>1\/10\<\/interface\>\s+'
'[\x20-\x7e\s]+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<remove\>\s+\<vlan\>267',
'\<no\>\s+\<vlan\>\s+<vlan-id-create-delete\>'
'\s+\<__XML__PARAM_value\>267',
]
self._create_port(
TestCiscoNexusDevice.test_configs['test_config1'])
# verify first config was indeed configured
self._verify_results(duplicate_add_port_driver_result)
self._create_port(
TestCiscoNexusDevice.test_configs['test_config3'])
# verify only the first config was applied
self._verify_results(duplicate_add_port_driver_result)
# Verify there are 2 port configs
bindings = nexus_db_v2.get_nexusvlan_binding(VLAN_ID_1,
NEXUS_IP_ADDRESS)
self.assertEqual(len(bindings), 2)
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._delete_port(
TestCiscoNexusDevice.test_configs['test_config1'])
# Using empty list verify no nexus action on first port removal
self._verify_results([])
self._delete_port(
TestCiscoNexusDevice.test_configs['test_config3'])
# verify port removed on 2nd port delete
self._verify_results(duplicate_delete_port_driver_result)
def test_create_delete_portchannel(self):
"""Tests creation of a port over a portchannel."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_portchannel'])
def test_create_delete_dual(self):
"""Tests creation and deletion of dual ports for single server"""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_dual'])
def test_create_delete_dhcp(self):
"""Tests creation and deletion of ports with device_owner of dhcp."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_dhcp'])
def test_connect_failure(self):
"""Verifies exception handling during ncclient connect. """
config = {'connect.side_effect': Exception(CONNECT_ERROR)}
self.mock_ncclient.configure_mock(**config)
e = self.assertRaises(exceptions.NexusConfigFailed,
self._create_port,
TestCiscoNexusDevice.test_configs[
'test_config1'])
self.assertIn(CONNECT_ERROR, unicode(e))
def test_get_interface_failure(self):
"""Verifies exception during ncclient get interface. """
self._create_port_failure(
'connect.return_value.get.side_effect',
'show running-config interface ethernet',
'test_config1',
__name__)
def test_enable_vxlan_feature_failure(self):
"""Verifies exception during enable VXLAN driver. """
# Set configuration variable to add/delete the VXLAN global nexus
# switch values.
cisco_config.cfg.CONF.set_override('vxlan_global_config', True,
'ml2_cisco')
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'feature nv overlay vn-segment-vlan-based',
'test_vxlan_config1',
__name__)
def test_disable_vxlan_feature_failure(self):
"""Verifies exception during disable VXLAN driver. """
# Set configuration variable to add/delete the VXLAN global nexus
# switch values.
cisco_config.cfg.CONF.set_override('vxlan_global_config', True,
'ml2_cisco')
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'no feature nv overlay vn-segment-vlan-based',
'test_vxlan_config1',
__name__)
def test_create_nve_member_failure(self):
"""Verifies exception during create nve member driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'member vni mcast-group',
'test_vxlan_config1',
__name__)
def test_delete_nve_member_failure(self):
"""Verifies exception during delete nve member driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'no member vni',
'test_vxlan_config1',
__name__)
def test_create_vlan_failure(self):
"""Verifies exception during edit vlan create driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete vlan-name',
'test_config1',
__name__)
def test_delete_vlan_failure(self):
"""Verifies exception during edit vlan delete driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete no vlan 267',
'test_config1',
__name__)
def test_create_trunk_failure(self):
"""Verifies exception during create trunk interface driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed vlan_id 267',
'test_config1',
__name__)
def test_delete_trunk_failure(self):
"""Verifies exception during delete trunk interface driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed remove vlan 267',
'test_config1',
__name__)
RP_NEXUS_IP_ADDRESS_1 = '1.1.1.1'
RP_NEXUS_IP_ADDRESS_2 = '2.2.2.2'
RP_NEXUS_IP_ADDRESS_3 = '3.3.3.3'
RP_HOST_NAME_1 = 'UniquePort'
RP_HOST_NAME_2 = 'DuplicateVlan'
RP_HOST_NAME_3 = 'DuplicatePort'
RP_INSTANCE_1 = 'testvm1'
RP_INSTANCE_2 = 'testvm2'
RP_NEXUS_PORT_1 = 'ethernet:1/10'
RP_NEXUS_PORT_2 = 'ethernet:1/20'
RP_VLAN_ID_1 = 267
RP_VLAN_ID_2 = 265
class TestCiscoNexusReplay(testlib_api.SqlTestCase):
"""Unit tests for Replay of Cisco ML2 Nexus data."""
TestConfigObj = collections.namedtuple(
'TestConfigObj',
'nexus_ip_addr host_name nexus_port instance_id vlan_id vxlan_id '
'mcast_group device_owner')
test_configs = {
'test_replay_unique1': TestConfigObj(
RP_NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
RP_NEXUS_PORT_1,
RP_INSTANCE_1,
RP_VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_replay_unique2': TestConfigObj(
RP_NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
RP_NEXUS_PORT_1,
RP_INSTANCE_2,
RP_VLAN_ID_2,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_replay_duplvlan1': TestConfigObj(
RP_NEXUS_IP_ADDRESS_2,
RP_HOST_NAME_2,
RP_NEXUS_PORT_1,
RP_INSTANCE_1,
RP_VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_replay_duplvlan2': TestConfigObj(
RP_NEXUS_IP_ADDRESS_2,
RP_HOST_NAME_2,
RP_NEXUS_PORT_2,
RP_INSTANCE_2,
RP_VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_replay_duplport1': TestConfigObj(
RP_NEXUS_IP_ADDRESS_3,
RP_HOST_NAME_3,
RP_NEXUS_PORT_1,
RP_INSTANCE_1,
RP_VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_replay_duplport2': TestConfigObj(
RP_NEXUS_IP_ADDRESS_3,
RP_HOST_NAME_3,
RP_NEXUS_PORT_1,
RP_INSTANCE_2,
RP_VLAN_ID_1,
NO_VXLAN_ID,
None,
DEVICE_OWNER_COMPUTE),
'test_replay_vxlan_unique1': TestConfigObj(
RP_NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
RP_NEXUS_PORT_1,
RP_INSTANCE_1,
RP_VLAN_ID_1,
VXLAN_ID,
'225.1.1.1',
DEVICE_OWNER_COMPUTE),
}
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
super(TestCiscoNexusReplay, self).setUp()
# Use a mock netconf client
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
data_xml = {'connect.return_value.get.return_value.data_xml': ''}
self.mock_ncclient.configure_mock(**data_xml)
cisco_config.cfg.CONF.set_override('switch_heartbeat_time',
30, 'ml2_cisco')
def new_nexus_init(mech_instance):
mech_instance.driver = importutils.import_object(NEXUS_DRIVER)
mech_instance.monitor_timeout = (
cisco_config.cfg.CONF.ml2_cisco.switch_heartbeat_time)
mech_instance._switch_state = {}
mech_instance._nexus_switches = {}
for name, config in TestCiscoNexusReplay.test_configs.items():
ip_addr = config.nexus_ip_addr
host_name = config.host_name
nexus_port = config.nexus_port
if (ip_addr, host_name) in mech_instance._nexus_switches:
saved_port = (
mech_instance._nexus_switches[(ip_addr, host_name)])
if saved_port != nexus_port:
mech_instance._nexus_switches[(ip_addr, host_name)] = (
saved_port + ',' + nexus_port)
else:
mech_instance._nexus_switches[(ip_addr,
host_name)] = nexus_port
mech_instance._nexus_switches[(ip_addr,
'ssh_port')] = NEXUS_SSH_PORT
mech_instance._nexus_switches[(ip_addr,
constants.USERNAME)] = 'admin'
mech_instance._nexus_switches[(ip_addr,
constants.PASSWORD)] = 'password'
mech_instance.driver.nexus_switches = (
mech_instance._nexus_switches)
mock.patch.object(mech_cisco_nexus.CiscoNexusMechanismDriver,
'__init__', new=new_nexus_init).start()
self._cisco_mech_driver = (mech_cisco_nexus.
CiscoNexusMechanismDriver())
self._cfg_monitor = (mech_cisco_nexus.
CiscoNexusCfgMonitor(
self._cisco_mech_driver.driver,
self._cisco_mech_driver))
def _create_port(self, port_config):
"""Tests creation of a virtual port."""
nexus_ip_addr = port_config.nexus_ip_addr
host_name = port_config.host_name
nexus_port = port_config.nexus_port
instance_id = port_config.instance_id
vlan_id = port_config.vlan_id
vxlan_id = port_config.vxlan_id
mcast_group = port_config.mcast_group
device_owner = port_config.device_owner
network_context = FakeNetworkContext(vlan_id, NETWORK_TYPE)
if vxlan_id != NO_VXLAN_ID:
vxlan_network_context = FakeNetworkContext(vlan_id,
NEXUS_VXLAN_NETWORK_TYPE, mcast_group)
port_context = FakePortContext(instance_id, host_name,
device_owner, vxlan_network_context, network_context)
else:
port_context = FakePortContext(instance_id, host_name,
device_owner, network_context)
self._cisco_mech_driver.update_port_precommit(port_context)
self._cisco_mech_driver.update_port_postcommit(port_context)
for port_id in nexus_port.split(','):
bindings = nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
self.assertEqual(len(bindings), 1)
def _delete_port(self, port_config):
"""Tests deletion of a virtual port."""
nexus_ip_addr = port_config.nexus_ip_addr
host_name = port_config.host_name
nexus_port = port_config.nexus_port
instance_id = port_config.instance_id
vlan_id = port_config.vlan_id
vxlan_id = port_config.vxlan_id
mcast_group = port_config.mcast_group
device_owner = port_config.device_owner
network_context = FakeNetworkContext(vlan_id, NETWORK_TYPE)
if vxlan_id != NO_VXLAN_ID:
vxlan_network_context = FakeNetworkContext(vlan_id,
NEXUS_VXLAN_NETWORK_TYPE, mcast_group)
port_context = FakePortContext(instance_id, host_name,
device_owner, vxlan_network_context, network_context)
else:
port_context = FakePortContext(instance_id, host_name,
device_owner, network_context)
self._cisco_mech_driver.delete_port_precommit(port_context)
self._cisco_mech_driver.delete_port_postcommit(port_context)
for port_id in nexus_port.split(','):
with testtools.ExpectedException(
exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
def _verify_replay_results(self, driver_result):
"""Verifies correct entries sent to Nexus."""
self.assertEqual(self.mock_ncclient.connect.return_value.
edit_config.call_count,
len(driver_result),
"Unexpected driver count")
for idx in xrange(0, len(driver_result)):
self.assertNotEqual(self.mock_ncclient.connect.
return_value.edit_config.mock_calls[idx][2]['config'],
None, "mock_data is None")
self.assertNotEqual(
re.search(driver_result[idx],
self.mock_ncclient.connect.return_value.
edit_config.mock_calls[idx][2]['config']),
None, "Expected result data not found")
def _process_replay(self, test1, test2, driver_results):
"""Tests create, replay, delete of two ports."""
# Set all connection state to True except for
# test case HOST_1, RP_NEXUS_IP_ADDRESS_1
cfg_type = ['test_replay_unique1',
'test_replay_duplvlan1',
'test_replay_duplport1']
for which_cfg in cfg_type:
if which_cfg in [test1, test2]:
state = False
else:
state = True
port_cfg = TestCiscoNexusReplay.test_configs[which_cfg]
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, state)
self._create_port(
TestCiscoNexusReplay.test_configs[test1])
self._create_port(
TestCiscoNexusReplay.test_configs[test2])
# Clean all the ncclient mock_calls so we can evaluate
# content as a result of replay()
self.mock_ncclient.reset_mock()
# Since only this test case connection state is False,
# it should be the only one replayed
self._cfg_monitor.check_connections()
self._verify_replay_results(driver_results)
self._delete_port(
TestCiscoNexusReplay.test_configs[test1])
self._delete_port(
TestCiscoNexusReplay.test_configs[test2])
def _config_side_effects(self, match_config, exc):
"""Generates config-dependent side effect for ncclient.
This method was written to configure side_effects for both
ncclient edit_config and get_config drivers. In the case
of edit_config, the arguments target and config are passed
into _side_effect_method. In the case of get, the argument
filter is passed into _side_effect_method. For the sake of
simplicity, the _side_effect_method was written to handle
either case.
"""
keywords = match_config.split()
def _side_effect_method(target=None, config=None, filter=None):
if config is None:
config = filter[1]
if all(word in config for word in keywords):
raise exc
return _side_effect_method
def _create_port_failure(self, attr, match_str, test_case, test_id):
"""Verifies exception handling during initial create object.
This method is a shared method to initiate an exception
at various point of object creation. The points of failure
are identified by the caller which can be get operations or
edit operations. When the mechanism replay is functioning,
the exception should be suppressed and the switch is marked
as inactive.
attr: Which mock attribute to contain side_effect exception
match_str: String for side_effect method to match for exception
test_case: which configuration test case to run thru test
test_id: String to put in the exception.
"""
# Set switch state to active
switch_ip = TestCiscoNexusReplay.test_configs[test_case].nexus_ip_addr
self._cisco_mech_driver.set_switch_ip_and_active_state(
switch_ip, True)
# Set up driver exception
config = {attr:
self._config_side_effects(match_str,
Exception(test_id))}
self.mock_ncclient.configure_mock(**config)
self._create_port(TestCiscoNexusReplay.test_configs[test_case])
# _create_port should complete successfully but switch state changed
# to inactive.
self.assertFalse(
self._cisco_mech_driver.get_switch_ip_and_active_state(switch_ip))
def _delete_port_failure(self, attr, match_str, test_case, test_id):
"""Verifies exception handling during object deletion.
This method is a shared method to initiate an exception
at various point of object deletion. The points of failure
are identified by the caller which can be get operations or
edit operations. When the mechanism replay is functioning,
the exception should be suppressed and the switch is marked
as inactive.
attr: Which mock attribute to contain side_effect exception
match_str: String for side_effect method to match for exception
test_case: which configuration test case to run thru test
test_id: String to put in the exception.
"""
# Set switch state to active
switch_ip = TestCiscoNexusReplay.test_configs[test_case].nexus_ip_addr
self._cisco_mech_driver.set_switch_ip_and_active_state(
switch_ip, True)
self._create_port(
TestCiscoNexusReplay.test_configs[test_case])
# _create_port should complete successfully and no switch state change.
self.assertTrue(
self._cisco_mech_driver.get_switch_ip_and_active_state(switch_ip))
# Set up driver exception
config = {attr:
self._config_side_effects(match_str,
Exception(test_id))}
self.mock_ncclient.configure_mock(**config)
self._delete_port(TestCiscoNexusReplay.test_configs[test_case])
# _delete_port should complete successfully but switch state changed
# to inactive.
self.assertFalse(
self._cisco_mech_driver.get_switch_ip_and_active_state(switch_ip))
def test_replay_unique_ports(self):
"""Provides replay data and result data for unique ports. """
unique_driver_result = [
'\<vlan\-name\>q\-265\<\/vlan\-name>',
'\<vstate\>active\<\/vstate>',
'\<no\>\s+\<shutdown\/\>\s+\<\/no\>',
'\<interface\>1\/10\<\/interface\>\s+'
'[\x20-\x7e]+\s+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<vlan_id\>265',
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vstate\>active\<\/vstate>',
'\<no\>\s+\<shutdown\/\>\s+\<\/no\>',
'\<interface\>1\/10\<\/interface\>\s+'
'[\x20-\x7e]+\s+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<vlan_id\>267',
]
self._process_replay('test_replay_unique1',
'test_replay_unique2',
unique_driver_result)
def test_replay_duplicate_vlan(self):
"""Provides replay data and result data for duplicate vlans. """
duplicate_vlan_result = [
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vstate\>active\<\/vstate>',
'\<no\>\s+\<shutdown\/\>\s+\<\/no\>',
'\<interface\>1\/10\<\/interface\>\s+'
'[\x20-\x7e]+\s+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<vlan_id\>267',
'\<interface\>1\/20\<\/interface\>\s+'
'[\x20-\x7e]+\s+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<vlan_id\>267',
]
self._process_replay('test_replay_duplvlan1',
'test_replay_duplvlan2',
duplicate_vlan_result)
def test_replay_duplicate_ports(self):
"""Provides replay data and result data for duplicate ports. """
duplicate_port_result = [
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vstate\>active\<\/vstate>',
'\<no\>\s+\<shutdown\/\>\s+\<\/no\>',
'\<interface\>1\/10\<\/interface\>\s+'
'[\x20-\x7e]+\s+\<switchport\>\s+\<trunk\>\s+'
'\<allowed\>\s+\<vlan\>\s+\<vlan_id\>267',
]
self._process_replay('test_replay_duplport1',
'test_replay_duplport2',
duplicate_port_result)
def test_replay_get_interface_failure(self):
"""Verifies exception during ncclient get interface. """
self._create_port_failure(
'connect.return_value.get.side_effect',
'show running-config interface ethernet',
'test_replay_unique1',
__name__)
def test_replay_enable_vxlan_feature_failure(self):
"""Verifies exception during enable VXLAN feature driver. """
# Set configuration variable to add/delete the VXLAN global nexus
# switch values.
cisco_config.cfg.CONF.set_override('vxlan_global_config', True,
'ml2_cisco')
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'feature nv overlay vn-segment-vlan-based',
'test_replay_vxlan_unique1',
__name__)
def test_replay_disable_vxlan_feature_failure(self):
"""Verifies exception during disable VXLAN feature driver. """
# Set configuration variable to add/delete the VXLAN global nexus
# switch values.
cisco_config.cfg.CONF.set_override('vxlan_global_config', True,
'ml2_cisco')
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'no feature nv overlay vn-segment-vlan-based',
'test_replay_vxlan_unique1',
__name__)
def test_replay_create_nve_member_failure(self):
"""Verifies exception during create nve member driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'member vni mcast-group',
'test_replay_vxlan_unique1',
__name__)
def test_replay_delete_nve_member_failure(self):
"""Verifies exception during delete nve member driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'no member vni',
'test_replay_vxlan_unique1',
__name__)
def test_replay_create_vlan_failure(self):
"""Verifies exception during edit vlan create driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete vlan-name',
'test_replay_unique1',
__name__)
def test_replay_delete_vlan_failure(self):
"""Verifies exception during edit vlan delete driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete no vlan 267',
'test_replay_unique1',
__name__)
def test_replay_create_trunk_failure(self):
"""Verifies exception during create trunk interface driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed vlan_id 267',
'test_replay_unique1',
__name__)
def test_replay_delete_trunk_failure(self):
"""Verifies exception during delete trunk interface driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed remove vlan 267',
'test_replay_unique1',
__name__)
def test_replay_get_nexus_type_failure(self):
"""Verifies exception during get nexus_type while replaying. """
#Set-up failed config which puts switch in inactive state
self.test_replay_create_vlan_failure()
# Clean all the ncclient mock_calls so we can evaluate
# content as a result of replay()
self.mock_ncclient.reset_mock()
# Set-up so get_nexus_type driver fails
config = {'connect.return_value.get.side_effect':
self._config_side_effects('show inventory',
Exception(__name__))}
self.mock_ncclient.configure_mock(**config)
# Perform replay which should not send back exception
# but merely quit
self._cfg_monitor.check_connections()
# Since get of nexus_type failed, there should be
# no attempt to configure anything.
self._verify_replay_results([])
def test_replay_retry_handling(self):
"""Verifies a series of events to check retry_count operations.
1) Verify retry count is incremented upon failure during replay.
2) Verify further attempts to configure replay data stops.
3) Verify upon receipt of new transaction that retry count
is reset to 0 so replay attempts will restart.
"""
unique_driver_result1 = [
'\<vlan\-name\>q\-267\<\/vlan\-name>',
]
unique_driver_result2 = [
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vlan\-name\>q\-267\<\/vlan\-name>',
'\<vlan\-name\>q\-267\<\/vlan\-name>',
]
config_replay = cisco_config.cfg.CONF.ml2_cisco.switch_replay_count
#Set-up failed config which puts switch in inactive state
self.test_replay_create_vlan_failure()
# Make sure there is only a single attempt to configure.
self._verify_replay_results(unique_driver_result1)
# Don't reset_mock so create_vlan continues failing
# Perform replay 4 times to exceed retry count of 3.
# This should not roll-up an exception but merely quit
for i in range(config_replay + 1):
self._cfg_monitor.check_connections()
# Verify switch retry count reached configured max and
# verify only 4 attempts to send create_vlan.
# first is from test_replay_create_vlan_failure()
# and only 3 from check_connections()
assert(self._cisco_mech_driver.get_switch_retry_count(
RP_NEXUS_IP_ADDRESS_1) == (config_replay + 1))
self._verify_replay_results(unique_driver_result2)
# Clean all the ncclient mock_calls to clear exception
# and other mock_call history.
self.mock_ncclient.reset_mock()
# Verify there exists a single port binding
assert(len(nexus_db_v2.get_nexusport_switch_bindings(
RP_NEXUS_IP_ADDRESS_1)) == 1)
# Sent another config which should reset retry count
# Verify replay results again
self._delete_port(
TestCiscoNexusReplay.test_configs['test_replay_unique1'])
# Verify port binding has been removed
# Verify switch retry count reset to 0 and
# verify no driver transactions have been sent
self.assertRaises(exceptions.NexusPortBindingNotFound,
nexus_db_v2.get_nexusport_switch_bindings,
RP_NEXUS_IP_ADDRESS_1)
assert(self._cisco_mech_driver.get_switch_retry_count(
RP_NEXUS_IP_ADDRESS_1) == 0)
self._verify_replay_results([])
| |
"""IO with fif files containing events
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Teon Brooks <teon.brooks@gmail.com>
# Clement Moutard <clement.moutard@polytechnique.org>
#
# License: BSD (3-clause)
import numpy as np
from os.path import splitext
from .utils import check_fname, logger, verbose, _get_stim_channel
from .io.constants import FIFF
from .io.tree import dir_tree_find
from .io.tag import read_tag
from .io.open import fiff_open
from .io.write import write_int, start_block, start_file, end_block, end_file
from .io.pick import pick_channels
def pick_events(events, include=None, exclude=None, step=False):
"""Select some events
Parameters
----------
events : ndarray
Array as returned by mne.find_events.
include : int | list | None
A event id to include or a list of them.
If None all events are included.
exclude : int | list | None
A event id to exclude or a list of them.
If None no event is excluded. If include is not None
the exclude parameter is ignored.
step : bool
If True (default is False), events have a step format according
to the argument output='step' in the function find_events().
In this case, the two last columns are considered in inclusion/
exclusion criteria.
Returns
-------
events : array, shape (n_events, 3)
The list of events
"""
if include is not None:
if not isinstance(include, list):
include = [include]
mask = np.zeros(len(events), dtype=np.bool)
for e in include:
mask = np.logical_or(mask, events[:, 2] == e)
if step:
mask = np.logical_or(mask, events[:, 1] == e)
events = events[mask]
elif exclude is not None:
if not isinstance(exclude, list):
exclude = [exclude]
mask = np.ones(len(events), dtype=np.bool)
for e in exclude:
mask = np.logical_and(mask, events[:, 2] != e)
if step:
mask = np.logical_and(mask, events[:, 1] != e)
events = events[mask]
else:
events = np.copy(events)
if len(events) == 0:
raise RuntimeError("No events found")
return events
def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax,
new_id=None, fill_na=None):
"""Define new events by co-occurrence of existing events
This function can be used to evaluate events depending on the
temporal lag to another event. For example, this can be used to
analyze evoked responses which were followed by a button press within
a defined time window.
Parameters
----------
events : ndarray
Array as returned by mne.find_events.
reference_id : int
The reference event. The event defining the epoch of interest.
target_id : int
The target event. The event co-occurring in within a certain time
window around the reference event.
sfreq : float
The sampling frequency of the data.
tmin : float
The lower limit in seconds from the target event.
tmax : float
The upper limit border in seconds from the target event.
new_id : int
new_id for the new event
fill_na : int | None
Fill event to be inserted if target is not available within the time
window specified. If None, the 'null' events will be dropped.
Returns
-------
new_events : ndarray
The new defined events
lag : ndarray
time lag between reference and target in milliseconds.
"""
if new_id is None:
new_id = reference_id
tsample = 1e3 / sfreq
imin = int(tmin * sfreq)
imax = int(tmax * sfreq)
new_events = []
lag = []
for event in events.copy().astype('f8'):
if event[2] == reference_id:
lower = event[0] + imin
upper = event[0] + imax
res = events[(events[:, 0] > lower) &
(events[:, 0] < upper) & (events[:, 2] == target_id)]
if res.any():
lag += [event[0] - res[0][0]]
event[2] = new_id
new_events += [event]
elif fill_na is not None:
event[2] = fill_na
new_events += [event]
lag += [fill_na]
new_events = np.array(new_events)
lag = np.abs(lag, dtype='f8')
if lag.any():
lag[lag != fill_na] *= tsample
else:
lag = np.array([])
return new_events if new_events.any() else np.array([]), lag
def _read_events_fif(fid, tree):
"""Aux function"""
# Find the desired block
events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
if len(events) == 0:
fid.close()
raise ValueError('Could not find event data')
events = events[0]
for d in events['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_EVENT_LIST:
tag = read_tag(fid, pos)
event_list = tag.data
break
else:
raise ValueError('Could not find any events')
mappings = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
mappings = mappings[0]
for d in mappings['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_DESCRIPTION:
tag = read_tag(fid, pos)
mappings = tag.data
break
else:
mappings = None
if mappings is not None: # deal with ':' in keys
m_ = [[s[::-1] for s in m[::-1].split(':', 1)]
for m in mappings.split(';')]
mappings = dict((k, int(v)) for v, k in m_)
event_list = event_list.reshape(len(event_list) // 3, 3)
return event_list, mappings
def read_events(filename, include=None, exclude=None, mask=0):
"""Reads events from fif or text file
Parameters
----------
filename : string
Name of the input file.
If the extension is .fif, events are read assuming
the file is in FIF format, otherwise (e.g., .eve,
.lst, .txt) events are read as coming from text.
Note that new format event files do not contain
the "time" column (used to be the second column).
include : int | list | None
A event id to include or a list of them.
If None all events are included.
exclude : int | list | None
A event id to exclude or a list of them.
If None no event is excluded. If include is not None
the exclude parameter is ignored.
mask : int
The value of the digital mask to apply to the stim channel values.
The default value is 0.
Returns
-------
events: array, shape (n_events, 3)
The list of events
Notes
-----
This function will discard the offset line (i.e., first line with zero
event number) if it is present in a text file.
Working with downsampled data: Events that were computed before the data
was decimated are no longer valid. Please recompute your events after
decimation.
"""
check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
'-eve.lst', '-eve.txt'))
ext = splitext(filename)[1].lower()
if ext == '.fif' or ext == '.gz':
fid, tree, _ = fiff_open(filename)
try:
event_list, _ = _read_events_fif(fid, tree)
finally:
fid.close()
else:
# Have to read this in as float64 then convert because old style
# eve/lst files had a second float column that will raise errors
lines = np.loadtxt(filename, dtype=np.float64).astype(np.uint32)
if len(lines) == 0:
raise ValueError('No text lines found')
if lines.ndim == 1: # Special case for only one event
lines = lines[np.newaxis, :]
if len(lines[0]) == 4: # Old format eve/lst
goods = [0, 2, 3] # Omit "time" variable
elif len(lines[0]) == 3:
goods = [0, 1, 2]
else:
raise ValueError('Unknown number of columns in event text file')
event_list = lines[:, goods]
if event_list.shape[0] > 0 and event_list[0, 2] == 0:
event_list = event_list[1:]
event_list = pick_events(event_list, include, exclude)
event_list = _mask_trigs(event_list, mask)
return event_list
def write_events(filename, event_list):
"""Write events to file
Parameters
----------
filename : string
Name of the output file.
If the extension is .fif, events are written in
binary FIF format, otherwise (e.g., .eve, .lst,
.txt) events are written as plain text.
Note that new format event files do not contain
the "time" column (used to be the second column).
event_list : array, shape (n_events, 3)
The list of events
"""
check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
'-eve.lst', '-eve.txt'))
ext = splitext(filename)[1].lower()
if ext == '.fif' or ext == '.gz':
# Start writing...
fid = start_file(filename)
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, event_list.T)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
end_file(fid)
else:
f = open(filename, 'w')
[f.write('%6d %6d %3d\n' % tuple(e)) for e in event_list]
f.close()
def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0):
changed = np.diff(data, axis=1) != 0
idx = np.where(np.all(changed, axis=0))[0]
if len(idx) == 0:
return np.empty((0, 3), dtype='int32')
pre_step = data[0, idx]
idx += 1
post_step = data[0, idx]
idx += first_samp
steps = np.c_[idx, pre_step, post_step]
if pad_start is not None:
v = steps[0, 1]
if v != pad_start:
steps = np.insert(steps, 0, [0, pad_start, v], axis=0)
if pad_stop is not None:
v = steps[-1, 2]
if v != pad_stop:
last_idx = len(data[0]) + first_samp
steps = np.append(steps, [[last_idx, v, pad_stop]], axis=0)
if merge != 0:
diff = np.diff(steps[:, 0])
idx = (diff <= abs(merge))
if np.any(idx):
where = np.where(idx)[0]
keep = np.logical_not(idx)
if merge > 0:
# drop the earlier event
steps[where + 1, 1] = steps[where, 1]
keep = np.append(keep, True)
else:
# drop the later event
steps[where, 2] = steps[where + 1, 2]
keep = np.insert(keep, 0, True)
is_step = (steps[:, 1] != steps[:, 2])
keep = np.logical_and(keep, is_step)
steps = steps[keep]
return steps
def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
stim_channel=None):
"""Find all steps in data from a stim channel
Parameters
----------
raw : Raw object
The raw data.
pad_start: None | int
Values to assume outside of the stim channel (e.g., if pad_start=0 and
the stim channel starts with value 5, an event of [0, 0, 5] will be
inserted at the beginning). With None, no steps will be inserted.
pad_stop : None | int
Values to assume outside of the stim channel, see ``pad_start``.
merge : int
Merge steps occurring in neighboring samples. The integer value
indicates over how many samples events should be merged, and the sign
indicates in which direction they should be merged (negative means
towards the earlier event, positive towards the later event).
stim_channel : None | string | list of string
Name of the stim channel or all the stim channels
affected by the trigger. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will default to
'STI 014'.
Returns
-------
steps : array, shape = (n_samples, 3)
For each step in the stim channel the values [sample, v_from, v_to].
The first column contains the event time in samples (the first sample
with the new value). The second column contains the stim channel value
before the step, and the third column contains value after the step.
See Also
--------
find_events : More sophisticated options for finding events in a Raw file.
"""
# pull stim channel from config if necessary
stim_channel = _get_stim_channel(stim_channel)
picks = pick_channels(raw.info['ch_names'], include=stim_channel)
if len(picks) == 0:
raise ValueError('No stim channel found to extract event triggers.')
data, _ = raw[picks, :]
if np.any(data < 0):
logger.warning('Trigger channel contains negative values. '
'Taking absolute value.')
data = np.abs(data) # make sure trig channel is positive
data = data.astype(np.int)
return _find_stim_steps(data, raw.first_samp, pad_start=pad_start,
pad_stop=pad_stop, merge=merge)
@verbose
def _find_events(data, first_samp, verbose=None, output='onset',
consecutive='increasing', min_samples=0, mask=0):
"""Helper function for find events"""
if min_samples > 0:
merge = int(min_samples // 1)
if merge == min_samples:
merge -= 1
else:
merge = 0
if np.any(data < 0):
logger.warning('Trigger channel contains negative values. '
'Taking absolute value.')
data = np.abs(data) # make sure trig channel is positive
data = data.astype(np.int)
events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge)
events = _mask_trigs(events, mask)
# Determine event onsets and offsets
if consecutive == 'increasing':
onsets = (events[:, 2] > events[:, 1])
offsets = np.logical_and(np.logical_or(onsets, (events[:, 2] == 0)),
(events[:, 1] > 0))
elif consecutive:
onsets = (events[:, 2] > 0)
offsets = (events[:, 1] > 0)
else:
onsets = (events[:, 1] == 0)
offsets = (events[:, 2] == 0)
onset_idx = np.where(onsets)[0]
offset_idx = np.where(offsets)[0]
if len(onset_idx) == 0 or len(offset_idx) == 0:
return np.empty((0, 3), dtype='int32')
# delete orphaned onsets/offsets
if onset_idx[0] > offset_idx[0]:
logger.info("Removing orphaned offset at the beginning of the file.")
offset_idx = np.delete(offset_idx, 0)
if onset_idx[-1] > offset_idx[-1]:
logger.info("Removing orphaned onset at the end of the file.")
onset_idx = np.delete(onset_idx, -1)
if output == 'onset':
events = events[onset_idx]
elif output == 'step':
idx = np.union1d(onset_idx, offset_idx)
events = events[idx]
elif output == 'offset':
event_id = events[onset_idx, 2]
events = events[offset_idx]
events[:, 1] = events[:, 2]
events[:, 2] = event_id
events[:, 0] -= 1
else:
raise Exception("Invalid output parameter %r" % output)
logger.info("%s events found" % len(events))
logger.info("Events id: %s" % np.unique(events[:, 2]))
return events
@verbose
def find_events(raw, stim_channel=None, verbose=None, output='onset',
consecutive='increasing', min_duration=0,
shortest_event=2, mask=0):
"""Find events from raw file
Parameters
----------
raw : Raw object
The raw data.
stim_channel : None | string | list of string
Name of the stim channel or all the stim channels
affected by the trigger. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will default to
'STI 014'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
output : 'onset' | 'offset' | 'step'
Whether to report when events start, when events end, or both.
consecutive : bool | 'increasing'
If True, consider instances where the value of the events
channel changes without first returning to zero as multiple
events. If False, report only instances where the value of the
events channel changes from/to zero. If 'increasing', report
adjacent events only when the second event code is greater than
the first.
min_duration : float
The minimum duration of a change in the events channel required
to consider it as an event (in seconds).
shortest_event : int
Minimum number of samples an event must last (default is 2). If the
duration is less than this an exception will be raised.
mask : int
The value of the digital mask to apply to the stim channel values.
The default value is 0.
Returns
-------
events : array, shape = (n_events, 3)
All events that were found. The first column contains the event time
in samples and the third column contains the event id. For output =
'onset' or 'step', the second column contains the value of the stim
channel immediately before the the event/step. For output = 'offset',
the second column contains the value of the stim channel after the
event offset.
Examples
--------
Consider data with a stim channel that looks like: [0, 32, 32, 33, 32, 0]
By default, find_events returns all samples at which the value of the
stim channel increases::
>>> print(find_events(raw)) # doctest: +SKIP
[[ 1 0 32]
[ 3 32 33]]
If consecutive is False, find_events only returns the samples at which
the stim channel changes from zero to a non-zero value::
>>> print(find_events(raw, consecutive=False)) # doctest: +SKIP
[[ 1 0 32]]
If consecutive is True, find_events returns samples at which the
event changes, regardless of whether it first returns to zero::
>>> print(find_events(raw, consecutive=True)) # doctest: +SKIP
[[ 1 0 32]
[ 3 32 33]
[ 4 33 32]]
If output is 'offset', find_events returns the last sample of each event
instead of the first one::
>>> print(find_events(raw, consecutive=True, # doctest: +SKIP
... output='offset'))
[[ 2 33 32]
[ 3 32 33]
[ 4 0 32]]
If output is 'step', find_events returns the samples at which an event
starts or ends::
>>> print(find_events(raw, consecutive=True, # doctest: +SKIP
... output='step'))
[[ 1 0 32]
[ 3 32 33]
[ 4 33 32]
[ 5 32 0]]
To ignore spurious events, it is also possible to specify a minimum
event duration. Assuming our events channel has a sample rate of
1000 Hz::
>>> print(find_events(raw, consecutive=True, # doctest: +SKIP
... min_duration=0.002))
[[ 1 0 32]]
For the digital mask, it will take the binary representation of the
digital mask, e.g. 5 -> '00000101', and will block the values
where mask is one, e.g.::
7 '0000111' <- trigger value
37 '0100101' <- mask
----------------
2 '0000010'
See Also
--------
find_stim_steps : Find all the steps in the stim channel.
"""
min_samples = min_duration * raw.info['sfreq']
# pull stim channel from config if necessary
stim_channel = _get_stim_channel(stim_channel)
pick = pick_channels(raw.info['ch_names'], include=stim_channel)
if len(pick) == 0:
raise ValueError('No stim channel found to extract event triggers.')
data, _ = raw[pick, :]
events = _find_events(data, raw.first_samp, verbose=verbose, output=output,
consecutive=consecutive, min_samples=min_samples,
mask=mask)
# add safety check for spurious events (for ex. from neuromag syst.) by
# checking the number of low sample events
n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event)
if n_short_events > 0:
raise ValueError("You have %i events shorter than the "
"shortest_event. These are very unusual and you "
"may want to set min_duration to a larger value e.g."
" x / raw.info['sfreq']. Where x = 1 sample shorter "
"than the shortest event length." % (n_short_events))
return events
def _mask_trigs(events, mask):
"""Helper function for masking digital trigger values"""
if not isinstance(mask, int):
raise TypeError('You provided a(n) %s. Mask must be an int.'
% type(mask))
n_events = len(events)
if n_events == 0:
return events.copy()
mask = np.bitwise_not(mask)
events[:, 1:] = np.bitwise_and(events[:, 1:], mask)
events = events[events[:, 1] != events[:, 2]]
return events
def merge_events(events, ids, new_id, replace_events=True):
"""Merge a set of events
Parameters
----------
events : array
Events.
ids : array of int
The ids of events to merge.
new_id : int
The new id.
replace_events : bool
If True (default), old event ids are replaced. Otherwise,
new events will be added to the old event list.
Returns
-------
new_events: array
The new events
"""
events_out = events.copy()
where = np.empty(events.shape[0], dtype=bool)
for col in [1, 2]:
where.fill(False)
for i in ids:
where = (events[:, col] == i)
events_out[where, col] = new_id
if not replace_events:
events_out = np.concatenate((events_out, events), axis=0)
events_out = events_out[np.argsort(events_out[:, 0])]
return events_out
def shift_time_events(events, ids, tshift, sfreq):
"""Shift an event
Parameters
----------
events : array, shape=(n_events, 3)
The events
ids : array int
The ids of events to shift.
tshift : float
Time-shift event. Use positive value tshift for forward shifting
the event and negative value for backward shift.
sfreq : float
The sampling frequency of the data.
Returns
-------
new_events : array
The new events.
"""
events = events.copy()
for ii in ids:
events[events[:, 2] == ii, 0] += int(tshift * sfreq)
return events
def make_fixed_length_events(raw, id, start=0, stop=None, duration=1.):
"""Make a set of events separated by a fixed duration
Parameters
----------
raw : instance of Raw
A raw object to use the data from.
id : int
The id to use.
start : float
Time of first event.
stop : float | None
Maximum time of last event. If None, events extend to the end
of the recording.
duration: float
The duration to separate events by.
Returns
-------
new_events : array
The new events.
"""
start = raw.time_as_index(start)
start = start[0] + raw.first_samp
if stop is not None:
stop = raw.time_as_index(stop)
stop = min([stop[0] + raw.first_samp, raw.last_samp + 1])
else:
stop = raw.last_samp + 1
if not isinstance(id, int):
raise ValueError('id must be an integer')
# Make sure we don't go out the end of the file:
stop -= int(np.ceil(raw.info['sfreq'] * duration))
ts = np.arange(start, stop, raw.info['sfreq'] * duration).astype(int)
n_events = len(ts)
events = np.c_[ts, np.zeros(n_events, dtype=int),
id * np.ones(n_events, dtype=int)]
return events
def concatenate_events(events, first_samps, last_samps):
"""Concatenate event lists in a manner compatible with
concatenate_raws
This is useful, for example, if you processed and/or changed
events in raw files separately before combining them using
concatenate_raws.
Parameters
----------
events : list of arrays
List of event arrays, typically each extracted from a
corresponding raw file that is being concatenated.
first_samps : list or array of int
First sample numbers of the raw files concatenated.
last_samps : list or array of int
Last sample numbers of the raw files concatenated.
Returns
-------
events : array
The concatenated events.
"""
if not isinstance(events, list):
raise ValueError('events must be a list of arrays')
if not (len(events) == len(last_samps) and
len(events) == len(first_samps)):
raise ValueError('events, first_samps, and last_samps must all have '
'the same lengths')
first_samps = np.array(first_samps)
last_samps = np.array(last_samps)
n_samps = np.cumsum(last_samps - first_samps + 1)
events_out = events[0]
for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]):
# remove any skip since it doesn't exist in concatenated files
e2 = e.copy()
e2[:, 0] -= f
# add offset due to previous files, plus original file offset
e2[:, 0] += n + first_samps[0]
events_out = np.concatenate((events_out, e2), axis=0)
return events_out
| |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.carrier import error
class TimerCallable(object):
def __init__(self, cbFun, callInterval):
self.__cbFun = cbFun
self.__callInterval = callInterval
self.__nextCall = 0
def __call__(self, timeNow):
if self.__nextCall <= timeNow:
self.__cbFun(timeNow)
self.__nextCall = timeNow + self.__callInterval
def __eq__(self, cbFun):
return self.__cbFun == cbFun
def __ne__(self, cbFun):
return self.__cbFun != cbFun
def __lt__(self, cbFun):
return self.__cbFun < cbFun
def __le__(self, cbFun):
return self.__cbFun <= cbFun
def __gt__(self, cbFun):
return self.__cbFun > cbFun
def __ge__(self, cbFun):
return self.__cbFun >= cbFun
class AbstractTransportDispatcher(object):
def __init__(self):
self.__transports = {}
self.__transportDomainMap = {}
self.__jobs = {}
self.__recvCallables = {}
self.__timerCallables = []
self.__ticks = 0
self.__timerResolution = 0.1
self.__timerDelta = self.__timerResolution * 0.05
self.__nextTime = 0
self.__routingCbFun = None
def _cbFun(self, incomingTransport, transportAddress, incomingMessage):
if incomingTransport in self.__transportDomainMap:
transportDomain = self.__transportDomainMap[incomingTransport]
else:
raise error.CarrierError(
'Unregistered transport %s' % (incomingTransport,)
)
if self.__routingCbFun:
recvId = self.__routingCbFun(
transportDomain, transportAddress, incomingMessage
)
else:
recvId = None
if recvId in self.__recvCallables:
self.__recvCallables[recvId](
self, transportDomain, transportAddress, incomingMessage
)
else:
raise error.CarrierError(
'No callback for "%r" found - loosing incoming event' % (recvId,)
)
# Dispatcher API
def registerRoutingCbFun(self, routingCbFun):
if self.__routingCbFun:
raise error.CarrierError(
'Data routing callback already registered'
)
self.__routingCbFun = routingCbFun
def unregisterRoutingCbFun(self):
if self.__routingCbFun:
self.__routingCbFun = None
def registerRecvCbFun(self, recvCb, recvId=None):
if recvId in self.__recvCallables:
raise error.CarrierError(
'Receive callback %r already registered' % (recvId is None and '<default>' or recvId,)
)
self.__recvCallables[recvId] = recvCb
def unregisterRecvCbFun(self, recvId=None):
if recvId in self.__recvCallables:
del self.__recvCallables[recvId]
def registerTimerCbFun(self, timerCbFun, tickInterval=None):
if not tickInterval:
tickInterval = self.__timerResolution
self.__timerCallables.append(TimerCallable(timerCbFun, tickInterval))
def unregisterTimerCbFun(self, timerCbFun=None):
if timerCbFun:
self.__timerCallables.remove(timerCbFun)
else:
self.__timerCallables = []
def registerTransport(self, transportDomain, transport):
if transportDomain in self.__transports:
raise error.CarrierError(
'Transport %s already registered' % (transportDomain,)
)
transport.registerCbFun(self._cbFun)
self.__transports[transportDomain] = transport
self.__transportDomainMap[transport] = transportDomain
def unregisterTransport(self, transportDomain):
if transportDomain not in self.__transports:
raise error.CarrierError(
'Transport %s not registered' % (transportDomain,)
)
self.__transports[transportDomain].unregisterCbFun()
del self.__transportDomainMap[self.__transports[transportDomain]]
del self.__transports[transportDomain]
def getTransport(self, transportDomain):
if transportDomain in self.__transports:
return self.__transports[transportDomain]
raise error.CarrierError(
'Transport %s not registered' % (transportDomain,))
def sendMessage(self, outgoingMessage, transportDomain,
transportAddress):
if transportDomain in self.__transports:
self.__transports[transportDomain].sendMessage(
outgoingMessage, transportAddress
)
else:
raise error.CarrierError('No suitable transport domain for '
'%s' % (transportDomain,))
def getTimerResolution(self):
return self.__timerResolution
def setTimerResolution(self, timerResolution):
if timerResolution < 0.01 or timerResolution > 10:
raise error.CarrierError('Impossible timer resolution')
self.__timerResolution = timerResolution
self.__timerDelta = timerResolution * 0.05
def getTimerTicks(self):
return self.__ticks
def handleTimerTick(self, timeNow):
if self.__nextTime == 0: # initial initialization
self.__nextTime = timeNow + self.__timerResolution - self.__timerDelta
if self.__nextTime >= timeNow:
return
self.__ticks += 1
self.__nextTime = timeNow + self.__timerResolution - self.__timerDelta
for timerCallable in self.__timerCallables:
timerCallable(timeNow)
def jobStarted(self, jobId, count=1):
if jobId in self.__jobs:
self.__jobs[jobId] += count
else:
self.__jobs[jobId] = count
def jobFinished(self, jobId, count=1):
self.__jobs[jobId] -= count
if self.__jobs[jobId] == 0:
del self.__jobs[jobId]
def jobsArePending(self):
return bool(self.__jobs)
def runDispatcher(self, timeout=0.0):
raise error.CarrierError('Method not implemented')
def closeDispatcher(self):
for tDomain in list(self.__transports):
self.__transports[tDomain].closeTransport()
self.unregisterTransport(tDomain)
self.__transports.clear()
self.unregisterRecvCbFun()
self.unregisterTimerCbFun()
class AbstractTransportAddress(object):
_localAddress = None
def setLocalAddress(self, s):
self._localAddress = s
return self
def getLocalAddress(self):
return self._localAddress
def clone(self, localAddress=None):
return self.__class__(self).setLocalAddress(
localAddress is None and self.getLocalAddress() or localAddress)
class AbstractTransport(object):
PROTO_TRANSPORT_DISPATCHER = None
ADDRESS_TYPE = AbstractTransportAddress
_cbFun = None
@classmethod
def isCompatibleWithDispatcher(cls, transportDispatcher):
return isinstance(transportDispatcher, cls.PROTO_TRANSPORT_DISPATCHER)
def registerCbFun(self, cbFun):
if self._cbFun:
raise error.CarrierError('Callback function %s already registered '
'at %s' % (self._cbFun, self))
self._cbFun = cbFun
def unregisterCbFun(self):
self._cbFun = None
def closeTransport(self):
self.unregisterCbFun()
# Public API
def openClientMode(self, iface=None):
raise error.CarrierError('Method not implemented')
def openServerMode(self, iface):
raise error.CarrierError('Method not implemented')
def sendMessage(self, outgoingMessage, transportAddress):
raise error.CarrierError('Method not implemented')
| |
# -*- coding: utf-8 -*-
import datetime
import calendar
import operator
from math import copysign
from six import integer_types
from warnings import warn
from ._common import weekday
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excellent
work done by M.-A. Lemburg in his
`mx.DateTime <http://www.egenix.com/files/python/mxDateTime.html>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an aritmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding aritmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1. Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2. Add the relative 'years' argument to the absolute year.
3. Do steps 1 and 2 for month/months.
4. Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5. Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7. If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Relative information
self.years = years
self.months = months
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return self.days // 7
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=(other.microsecond or
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=self.year or other.year,
month=self.month or other.month,
day=self.day or other.day,
weekday=self.weekday or other.weekday,
hour=self.hour or other.hour,
minute=self.minute or other.minute,
second=self.second or other.second,
microsecond=self.microsecond or other.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et
| |
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import units
from hyperv.nova import constants
from hyperv.nova import vhdutils
from hyperv.nova import vmutils
from hyperv.tests import test
class VHDUtilsBaseTestCase(test.NoDBTestCase):
"Base Class unit test classes of Hyper-V VHD Utils classes."
_FAKE_VHD_PATH = "C:\\fake_path.vhdx"
_FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx"
_FAKE_FORMAT = 3
_FAKE_TYPE = 3
_FAKE_MAX_INTERNAL_SIZE = units.Gi
_FAKE_DYNAMIC_BLK_SIZE = 2097152L
_FAKE_BAD_TYPE = 5
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_VHD_INFO_XML = (
"""<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData">
<PROPERTY NAME="BlockSize" TYPE="uint32">
<VALUE>33554432</VALUE>
</PROPERTY>
<PROPERTY NAME="Caption" TYPE="string">
<VALUE>Virtual Hard Disk Setting Data</VALUE>
</PROPERTY>
<PROPERTY NAME="Description" TYPE="string">
<VALUE>Setting Data for a Virtual Hard Disk.</VALUE>
</PROPERTY>
<PROPERTY NAME="ElementName" TYPE="string">
<VALUE>fake_path.vhdx</VALUE>
</PROPERTY>
<PROPERTY NAME="Format" TYPE="uint16">
<VALUE>%(format)s</VALUE>
</PROPERTY>
<PROPERTY NAME="InstanceID" TYPE="string">
<VALUE>52794B89-AC06-4349-AC57-486CAAD52F69</VALUE>
</PROPERTY>
<PROPERTY NAME="LogicalSectorSize" TYPE="uint32">
<VALUE>4096</VALUE>
</PROPERTY>
<PROPERTY NAME="MaxInternalSize" TYPE="uint64">
<VALUE>%(max_internal_size)s</VALUE>
</PROPERTY>
<PROPERTY NAME="ParentPath" TYPE="string">
<VALUE>%(parent_path)s</VALUE>
</PROPERTY>
<PROPERTY NAME="Path" TYPE="string">
<VALUE>%(path)s</VALUE>
</PROPERTY>
<PROPERTY NAME="PhysicalSectorSize" TYPE="uint32">
<VALUE>4096</VALUE>
</PROPERTY>
<PROPERTY NAME="Type" TYPE="uint16">
<VALUE>%(type)s</VALUE>
</PROPERTY>
</INSTANCE>""" % {'path': _FAKE_VHD_PATH,
'parent_path': _FAKE_PARENT_PATH,
'format': _FAKE_FORMAT,
'max_internal_size': _FAKE_MAX_INTERNAL_SIZE,
'type': _FAKE_TYPE})
class VHDUtilsTestCase(VHDUtilsBaseTestCase):
"""Unit tests for the Hyper-V VHDUtils class."""
def setUp(self):
super(VHDUtilsTestCase, self).setUp()
self._vhdutils = vhdutils.VHDUtils()
self._vhdutils._conn = mock.MagicMock()
self._vhdutils._vmutils = mock.MagicMock()
self._fake_vhd_info = {
'ParentPath': self._FAKE_PARENT_PATH,
'MaxInternalSize': self._FAKE_MAX_INTERNAL_SIZE,
'Type': self._FAKE_TYPE}
def test_validate_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ValidateVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.validate_vhd(self._FAKE_VHD_PATH)
mock_img_svc.ValidateVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH)
def test_get_vhd_info(self):
self._mock_get_vhd_info()
vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
self.assertEqual(self._fake_vhd_info, vhd_info)
def _mock_get_vhd_info(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.GetVirtualHardDiskInfo.return_value = (
self._FAKE_VHD_INFO_XML, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
def test_create_dynamic_vhd(self):
self._vhdutils.get_vhd_info = mock.MagicMock(
return_value={'Format': self._FAKE_FORMAT})
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateDynamicVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE,
constants.DISK_FORMAT_VHD)
mock_img_svc.CreateDynamicVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def test_reconnect_parent_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ReconnectParentVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH,
self._FAKE_PARENT_PATH)
mock_img_svc.ReconnectParentVirtualHardDisk.assert_called_once_with(
ChildPath=self._FAKE_VHD_PATH,
ParentPath=self._FAKE_PARENT_PATH,
Force=True)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def test_merge_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.MergeVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.merge_vhd(self._FAKE_VHD_PATH, self._FAKE_VHD_PATH)
mock_img_svc.MergeVirtualHardDisk.assert_called_once_with(
SourcePath=self._FAKE_VHD_PATH,
DestinationPath=self._FAKE_VHD_PATH)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def test_resize_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ExpandVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
return_value=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE)
mock_img_svc.ExpandVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def _mocked_get_internal_vhd_size(self, root_vhd_size, vhd_type):
mock_get_vhd_info = mock.MagicMock(return_value={'Type': vhd_type})
mock_get_blk_size = mock.MagicMock(
return_value=self._FAKE_DYNAMIC_BLK_SIZE)
with mock.patch.multiple(self._vhdutils,
get_vhd_info=mock_get_vhd_info,
_get_vhd_dynamic_blk_size=mock_get_blk_size):
return self._vhdutils.get_internal_vhd_size_by_file_size(
None, root_vhd_size)
def test_create_differencing_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
self._FAKE_PARENT_PATH)
mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
ParentPath=self._FAKE_PARENT_PATH)
def test_get_internal_vhd_size_by_file_size_fixed(self):
root_vhd_size = 1 * 1024 ** 3
real_size = self._mocked_get_internal_vhd_size(
root_vhd_size, constants.VHD_TYPE_FIXED)
expected_vhd_size = 1 * 1024 ** 3 - 512
self.assertEqual(expected_vhd_size, real_size)
def test_get_internal_vhd_size_by_file_size_dynamic(self):
root_vhd_size = 20 * 1024 ** 3
real_size = self._mocked_get_internal_vhd_size(
root_vhd_size, constants.VHD_TYPE_DYNAMIC)
expected_vhd_size = 20 * 1024 ** 3 - 43008
self.assertEqual(expected_vhd_size, real_size)
def test_get_internal_vhd_size_by_file_size_differencing(self):
# For differencing images, the internal size of the parent vhd
# is returned
vhdutil = vhdutils.VHDUtils()
root_vhd_size = 20 * 1024 ** 3
vhdutil.get_vhd_info = mock.MagicMock()
vhdutil.get_vhd_parent_path = mock.MagicMock()
vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH
vhdutil.get_vhd_info.side_effect = [
{'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}]
vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
root_vhd_size)
expected_vhd_size = 20 * 1024 ** 3 - 43008
self.assertEqual(expected_vhd_size, real_size)
def test_get_vhd_format_vhdx(self):
with mock.patch('hyperv.nova.vhdutils.open',
mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE),
create=True):
format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
self.assertEqual(constants.DISK_FORMAT_VHDX, format)
def test_get_vhd_format_vhd(self):
with mock.patch('hyperv.nova.vhdutils.open',
mock.mock_open(),
create=True) as mock_open:
f = mock_open.return_value
f.tell.return_value = 1024
readdata = ['notthesig', vhdutils.VHD_SIGNATURE]
def read(*args):
for content in readdata:
yield content
f.read.side_effect = read()
format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
self.assertEqual(constants.DISK_FORMAT_VHD, format)
def test_get_vhd_format_invalid_format(self):
with mock.patch('hyperv.nova.vhdutils.open',
mock.mock_open(read_data='invalid'),
create=True) as mock_open:
f = mock_open.return_value
f.tell.return_value = 1024
self.assertRaises(vmutils.HyperVException,
self._vhdutils.get_vhd_format,
self._FAKE_VHD_PATH)
def test_get_vhd_format_zero_length_file(self):
with mock.patch('hyperv.nova.vhdutils.open',
mock.mock_open(read_data=''),
create=True) as mock_open:
f = mock_open.return_value
f.tell.return_value = 0
self.assertRaises(vmutils.HyperVException,
self._vhdutils.get_vhd_format,
self._FAKE_VHD_PATH)
f.seek.assert_called_once_with(0, 2)
def test_get_supported_vhd_format(self):
fmt = self._vhdutils.get_best_supported_vhd_format()
self.assertEqual(constants.DISK_FORMAT_VHD, fmt)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.